source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
GB_unop__lnot_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lnot_int32_int32
// op(A') function: GB_unop_tran__lnot_int32_int32
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lnot_int32_int32
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int32_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = !(z != 0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = !(z != 0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lnot_int32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cpu_ctc.h | #pragma once
#include <tuple>
#include <cmath>
#include <limits>
#include <algorithm>
#include <numeric>
#if !defined(CTC_DISABLE_OMP) && !defined(APPLE)
#include <omp.h>
#endif
#include "ctc_helper.h"
template<typename ProbT>
class CpuCTC {
public:
// Noncopyable
CpuCTC(int alphabet_size, int minibatch, void* workspace, int num_threads) :
alphabet_size_(alphabet_size), minibatch_(minibatch),
num_threads_(num_threads), workspace_(workspace) {
#if defined(CTC_DISABLE_OMP) || defined(APPLE)
#else
if (num_threads > 0) {
omp_set_num_threads(num_threads);
} else {
num_threads_ = omp_get_max_threads();
}
#endif
};
CpuCTC(const CpuCTC&) = delete;
CpuCTC& operator=(const CpuCTC&) = delete;
ctcStatus_t cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
ctcStatus_t score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
private:
class CpuCTC_metadata {
private:
int setup_labels(const int* const labels, int L, int S);
public:
CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size,
void* workspace, size_t bytes_used,
const int* const labels);
ProbT* alphas;
ProbT* betas;
int* labels_w_blanks;
int* e_inc;
int* s_inc;
ProbT* output;
int repeats;
};
int alphabet_size_; // Number of characters plus blank
int minibatch_;
int num_threads_;
void* workspace_;
void softmax(const ProbT* const activations, ProbT* probs,
const int* const input_lengths);
std::tuple<ProbT, bool>
cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels, int T, int L,
int mb, size_t bytes_used);
ProbT compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas);
ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output);
};
template<typename ProbT>
CpuCTC<ProbT>::CpuCTC_metadata::CpuCTC_metadata(int L, int S, int T, int mb,
int alphabet_size,
void* workspace, size_t bytes_used,
const int* const labels) {
alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S * T;
std::fill(alphas, alphas + S * T, ctc_helper::neg_inf<ProbT>());
betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S;
std::fill(betas, betas + S, ctc_helper::neg_inf<ProbT>());
labels_w_blanks = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
e_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
s_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
output = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * alphabet_size;
repeats = setup_labels(labels, L, S);
}
template<typename ProbT>
int CpuCTC<ProbT>::CpuCTC_metadata::setup_labels(const int* const labels,
int L, int S) {
int e_counter = 0;
int s_counter = 0;
s_inc[s_counter++] = 1;
int repeats = 0;
for (int i = 1; i < L; ++i) {
if (labels[i-1] == labels[i]) {
s_inc[s_counter++] = 1;
s_inc[s_counter++] = 1;
e_inc[e_counter++] = 1;
e_inc[e_counter++] = 1;
++repeats;
}
else {
s_inc[s_counter++] = 2;
e_inc[e_counter++] = 2;
}
}
e_inc[e_counter++] = 1;
for (int i = 0; i < L; ++i) {
labels_w_blanks[2 * i] = ctc_helper::BLANK;
labels_w_blanks[2 * i + 1] = labels[i];
}
labels_w_blanks[S - 1] = ctc_helper::BLANK;
return repeats;
}
template<typename ProbT>
void
CpuCTC<ProbT>::softmax(const ProbT* const activations, ProbT* probs,
const int* const input_lengths) {
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
for(int c = 0; c < input_lengths[mb]; ++c) {
int col_offset = (mb + minibatch_ * c) * alphabet_size_;
ProbT max_activation = -std::numeric_limits<ProbT>::infinity();
for(int r = 0; r < alphabet_size_; ++r)
max_activation = std::max(max_activation, activations[r + col_offset]);
ProbT denom = ProbT(0.);
for(int r = 0; r < alphabet_size_; ++r)
denom += std::exp(activations[r + col_offset] - max_activation);
for(int r = 0; r < alphabet_size_; ++r) {
probs[r + col_offset] = std::exp(activations[r + col_offset] - max_activation) / denom;
}
}
}
}
template<typename ProbT>
std::tuple<ProbT, bool>
CpuCTC<ProbT>::cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels,
int T, int L, int mb, size_t bytes_used) {
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used, labels);
bool over_threshold = false;
if (L + ctcm.repeats > T) {
return std::make_tuple(ProbT(0), over_threshold); // TODO, not right to return 0
}
ProbT llForward = compute_alphas(probs, ctcm.repeats, S, T, ctcm.e_inc,
ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
ProbT llBackward = compute_betas_and_grad(grad, probs, llForward, ctcm.repeats,
S, T, ctcm.e_inc, ctcm.s_inc,
ctcm.labels_w_blanks,
ctcm.alphas,
ctcm.betas,
ctcm.output);
ProbT diff = std::abs(llForward - llBackward);
if (diff > ctc_helper::threshold) {
over_threshold = true;
}
return std::make_tuple(-llForward, over_threshold);
}
// Computes forward probabilities
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas) {
int start = (((S /2) + repeats - T) < 0) ? 0 : 1,
end = S > 1 ? 2 : 1;
for (int i = start; i < end; ++i) {
alphas[i] = std::log(probs[labels[i]]);
}
for(int t = 1; t < T; ++t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= 0)
start += s_inc[remain];
if(t <= (S / 2) + repeats)
end += e_inc[t - 1];
int startloop = start;
int idx1 = t * S, idx2 = (t - 1) * S, idx3 = t * (alphabet_size_ * minibatch_);
if (start == 0) {
alphas[idx1] = alphas[idx2] + std::log(probs[ctc_helper::BLANK + idx3]);
startloop += 1;
}
for(int i = startloop; i < end; ++i) {
ProbT prev_sum = ctc_helper::log_plus<ProbT>()(alphas[i + idx2], alphas[(i-1) + idx2]);
// Skip two if not on blank and not on repeat.
if (labels[i] != ctc_helper::BLANK && i != 1 && labels[i] != labels[i-2])
prev_sum = ctc_helper::log_plus<ProbT>()(prev_sum, alphas[(i-2) + idx2]);
alphas[i + idx1] = prev_sum + std::log(probs[labels[i] + idx3]);
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, alphas[i + (T - 1) * S]);
}
return loglike;
}
// Starting from T, we sweep backward over the alpha array computing one column
// of betas as we go. At each position we can update product alpha * beta and then
// sum into the gradient associated with each label.
// NOTE computes gradient w.r.t UNNORMALIZED final layer activations.
// Assumed passed in grads are already zeroed!
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output) {
int start = S > 1 ? (S - 2) : 0,
end = (T > (S / 2) + repeats) ? S : S-1;
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
//set the starting values in the beta column at the very right edge
for (int i = start; i < end; ++i) {
betas[i] = std::log(probs[labels[i] + (T - 1) * (alphabet_size_ * minibatch_)]);
//compute alpha * beta in log space at this position in (S, T) space
alphas[i + (T - 1) * S] += betas[i];
//update the gradient associated with this label
//essentially performing a reduce-by-key in a sequential manner
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + (T - 1) * S], output[labels[i]]);
}
//update the gradient wrt to each unique label
for (int i = 0; i < alphabet_size_; ++i) {
int idx3 = (T - 1) * alphabet_size_ * minibatch_ + i;
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
}
//loop from the second to last column all the way to the left
for(int t = T - 2; t >= 0; --t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= -1)
start -= s_inc[remain + 1];
if(t < (S / 2) + repeats)
end -= e_inc[t];
int endloop = end == S ? end - 1 : end;
int idx1 = t * S, idx3 = t * (alphabet_size_ * minibatch_);
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
for(int i = start; i < endloop; ++i) {
ProbT next_sum = ctc_helper::log_plus<ProbT>()(betas[i], betas[(i+1)]);
// Skip two if not on blank and not on repeat.
if (labels[i] != ctc_helper::BLANK && i != (S-2) && labels[i] != labels[i+2]){
next_sum = ctc_helper::log_plus<ProbT>()(next_sum, betas[(i+2)]);
}
betas[i] = next_sum + std::log(probs[labels[i] + idx3]);
//compute alpha * beta in log space
alphas[i + idx1] += betas[i];
//update the gradient associated with this label
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + idx1], output[labels[i]]);
}
if (end == S) {
betas[(S-1)] = betas[(S-1)] + std::log(probs[ctc_helper::BLANK + idx3]);
alphas[(S-1) + idx1] += betas[(S-1)];
output[labels[S-1]] =
ctc_helper::log_plus<ProbT>()(alphas[S-1 + idx1], output[labels[S-1]]);
}
//go over the unique labels and compute the final grad
// wrt to each one at this time step
for (int i = 0; i < alphabet_size_; ++i) {
if (output[i] == 0.0 || output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
++idx3;
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, betas[i]);
}
return loglike;
}
template<typename ProbT>
ctcStatus_t
CpuCTC<ProbT>::cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT *costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
grads == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);;
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
softmax(activations, probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
bool mb_status;
std::tie(costs[mb], mb_status) =
cost_and_grad_kernel(grads + mb * alphabet_size_,
probs + mb * alphabet_size_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0),
T, L, mb,
bytes_used + mb * per_minibatch_bytes);
}
return CTC_STATUS_SUCCESS;
}
template<typename ProbT>
ctcStatus_t CpuCTC<ProbT>::score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
softmax(activations, probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_,
bytes_used + mb * per_minibatch_bytes,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0));
if (L + ctcm.repeats > T)
costs[mb] = ProbT(0);
else {
costs[mb] = -compute_alphas(probs + mb * alphabet_size_, ctcm.repeats, S, T,
ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
}
}
return CTC_STATUS_SUCCESS;
}
|
sharp_utils.c | #include "sharp_utils.h"
#include <immintrin.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <omp.h>
int min(int a, int b) { return a < b ? a : b; }
int max(int a, int b) { return a > b ? a : b; }
void alm2cl_sp(sharp_alm_info * ainfo, float * alm1, float * alm2, float * cl) {
int nthread = omp_get_max_threads();
int nl = ainfo->lmax+1;
float * buf = calloc(nthread*nl, sizeof(float));
#pragma omp parallel
{
int id = omp_get_thread_num();
if(id == 0) {
for(int l = 0; l <= ainfo->lmax; l++) {
ptrdiff_t i = ainfo->mvstart[0]*2 + l*2;
buf[nl*id+l] = alm1[i]*alm2[i]/2;
}
}
#pragma omp for schedule(dynamic)
for(int m = 1; m < ainfo->nm; m++) {
for(int l = m; l <= ainfo->lmax; l++) {
ptrdiff_t i = ainfo->mvstart[m]*2 + l*2;
buf[nl*id+l] += alm1[i]*alm2[i] + alm1[i+1]*alm2[i+1];
}
}
#pragma omp barrier
#pragma omp for
for(int l = 0; l < nl; l++) {
cl[l] = 0;
for(int i = 0; i < nthread; i++)
cl[l] += buf[nl*i+l];
cl[l] *= 2.0/(2*l+1);
}
}
free(buf);
}
void alm2cl_dp(sharp_alm_info * ainfo, double * alm1, double * alm2, double * cl) {
int nthread = omp_get_max_threads();
int nl = ainfo->lmax+1;
double * buf = calloc(nthread*nl, sizeof(double));
#pragma omp parallel
{
int id = omp_get_thread_num();
if(id == 0) {
for(int l = 0; l <= ainfo->lmax; l++) {
ptrdiff_t i = ainfo->mvstart[0]*2 + l*2;
buf[nl*id+l] = alm1[i]*alm2[i]/2;
}
}
#pragma omp for schedule(dynamic)
for(int m = 1; m < ainfo->nm; m++) {
for(int l = m; l <= ainfo->lmax; l++) {
ptrdiff_t i = ainfo->mvstart[m]*2 + l*2;
buf[nl*id+l] += alm1[i]*alm2[i] + alm1[i+1]*alm2[i+1];
}
}
#pragma omp barrier
#pragma omp for
for(int l = 0; l < nl; l++) {
cl[l] = 0;
for(int i = 0; i < nthread; i++)
cl[l] += buf[nl*i+l];
cl[l] *= 2.0/(2*l+1);
}
}
free(buf);
}
#if 0
void alm2cl_plain_sp(sharp_alm_info * ainfo, float * alm, float * cl) {
// Straightforward implementation. The baseline we will try to improve on.
// We don't support stride or packing.
for(int l = 0; l <= ainfo->lmax; l++) {
ptrdiff_t i = ainfo->mvstart[0]*2 + l*2;
float c = alm[i]*alm[i];
for(int mi = 1; mi < ainfo->nm; mi++) {
if(ainfo->mval[mi] > l) continue;
i = ainfo->mvstart[mi]*2 + l*2;
c += 2*(alm[i]*alm[i] + alm[i+1]*alm[i+1]);
}
cl[l] = c/(2*l+1);
}
}
void alm2cl_mmajor_sp(sharp_alm_info * ainfo, float * alm, float * cl) {
for(int l = 0; l <= ainfo->lmax; l++) {
ptrdiff_t i = ainfo->mvstart[0]*2 + l*2;
cl[l] = alm[i]*alm[i];
}
for(int mi = 1; mi < ainfo->nm; mi++) {
int m = ainfo->mval[mi];
for(int l = m; l <= ainfo->lmax; l++) {
ptrdiff_t i = ainfo->mvstart[mi]*2 + l*2;
cl[l] += 2*(alm[i]*alm[i] + alm[i+1]*alm[i+1]);
}
}
for(int l = 0; l <= ainfo->lmax; l++) {
cl[l] /= (2*l+1);
}
}
void alm2cl_mmajor2_sp(sharp_alm_info * ainfo, float * alm, float * cl) {
int nthread = omp_get_max_threads();
int nl = ainfo->lmax+1;
float * buf = calloc(nthread*nl, sizeof(float));
#pragma omp parallel
{
int id = omp_get_thread_num();
if(id == 0) {
for(int l = 0; l <= ainfo->lmax; l++) {
ptrdiff_t i = ainfo->mvstart[0]*2 + l*2;
buf[nl*id+l] = alm[i]*alm[i];
}
}
#pragma omp for schedule(dynamic)
for(int m = 1; m < ainfo->nm; m++) {
for(int l = m; l <= ainfo->lmax; l++) {
ptrdiff_t i = ainfo->mvstart[m]*2 + l*2;
if(m == 0) buf[nl*id+l] += alm[i]*alm[i];
else buf[nl*id+l] += 2*(alm[i]*alm[i] + alm[i+1]*alm[i+1]);
}
}
}
#pragma omp parallel for
for(int l = 0; l < nl; l++) {
cl[l] = 0;
for(int i = 0; i < nthread; i++)
cl[l] += buf[nl*i+l];
cl[l] /= 2*l+1;
}
free(buf);
}
void alm2cl_avx_sp(sharp_alm_info * ainfo, float * alm, float * cl) {
// AVX verison. Operate on 8 ls at a time. Simple l-major one first.
// In theory this should be safe, as we're working on units larger than a
// cache line. Fast operations require 32-byte alignment, but it's hard to
// ensure that. That means that this won't be much faster than the automatic
// version. I measure it to be 17% faster.
#pragma omp parallel for schedule(dynamic)
for(int l1 = 0; l1 <= ainfo->lmax; l1+=8) {
int l2 = min(l1+8, ainfo->lmax+1);
int nm_block;
if(l2 == l1+8) {
// We have a whole block worth of ls. Handle all our ls up to nm_block
nm_block = min(l1+1, ainfo->nm);
__m256 a1, a2, c = _mm256_setzero_ps(), fact, tmp;
__m256i permutation = _mm256_set_epi32(7,6,3,2,5,4,1,0);
for(int m = 0; m < nm_block; m++) {
fact = _mm256_set1_ps(m?2:1);
// Load our alms
ptrdiff_t i = ainfo->mvstart[m]*2 + l1*2;
a1 = _mm256_loadu_ps(alm + i + 0);
a2 = _mm256_loadu_ps(alm + i + 8);
a1 *= a1;
a2 *= a2;
tmp = _mm256_hadd_ps(a1, a2);
// At this point we have 76325410, but we want 76543210
tmp = _mm256_permutevar8x32_ps(tmp, permutation);
c += tmp*fact;
}
_mm256_storeu_ps(cl + l1, c);
} else {
// We're at the end of the ls, so we can't do blocks. This will
// be handled by the general case below, but before that we need
// to zero-initialize our values. We don't handle any ms in this case,
nm_block = 0;
for(int l = l1; l < l2; l++)
cl[l] = 0;
}
// General case
for(int m = nm_block; m < ainfo->nm; m++) {
for(int l = max(m,l1); l < l2; l++) {
ptrdiff_t i = ainfo->mvstart[m]*2 + l*2;
cl[l] += (1+(m>0))*(alm[i]*alm[i]+alm[i+1]*alm[i+1]);
}
}
}
// And normalize
for(int l = 0; l <= ainfo->lmax; l++)
cl[l] /= 2*l+1;
}
#endif
|
GB_binop__le_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__le_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__le_int16)
// A*D function (colscale): GB (_AxD__le_int16)
// D*A function (rowscale): GB (_DxB__le_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__le_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__le_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__le_int16)
// C=scalar+B GB (_bind1st__le_int16)
// C=scalar+B' GB (_bind1st_tran__le_int16)
// C=A+scalar GB (_bind2nd__le_int16)
// C=A'+scalar GB (_bind2nd_tran__le_int16)
// C type: bool
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LE || GxB_NO_INT16 || GxB_NO_LE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__le_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__le_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__le_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__le_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__le_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__le_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__le_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__le_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__le_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
calculate_octree_signed_distance_to_3d_skin_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Pooyan Dadvand
//
#if !defined(KRATOS_CALCULATE_DISTANCE_PROCESS_H_INCLUDED )
#define KRATOS_CALCULATE_DISTANCE_PROCESS_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// External includes
// Project includes
#include "includes/define.h"
#include "processes/process.h"
#include "includes/model_part.h"
#include "spatial_containers/octree_binary.h"
#include "utilities/spatial_containers_configure.h"
#include "utilities/timer.h"
#include "utilities/math_utils.h"
#include "utilities/geometry_utilities.h"
namespace Kratos
{
class DistanceSpatialContainersConfigure
{
public:
class CellNodeData
{
double mDistance;
double mCoordinates[3];
std::size_t mId;
public:
double& Distance(){return mDistance;}
double& X() {return mCoordinates[0];}
double& Y() {return mCoordinates[1];}
double& Z() {return mCoordinates[2];}
double& operator[](int i) {return mCoordinates[i];}
std::size_t& Id(){return mId;}
};
///@name Type Definitions
///@{
enum { Dimension = 3,
DIMENSION = 3,
MAX_LEVEL = 12,
MIN_LEVEL = 2
};
typedef Point PointType; /// always the point 3D
typedef std::vector<double>::iterator DistanceIteratorType;
typedef ModelPart::ElementsContainerType::ContainerType ContainerType;
typedef ContainerType::value_type PointerType;
typedef ContainerType::iterator IteratorType;
typedef ModelPart::ElementsContainerType::ContainerType ResultContainerType;
typedef ResultContainerType::value_type ResultPointerType;
typedef ResultContainerType::iterator ResultIteratorType;
typedef Element::Pointer pointer_type;
typedef std::vector<CellNodeData*> data_type;
typedef std::vector<PointerType>::iterator PointerTypeIterator;
/// Pointer definition of DistanceSpatialContainersConfigure
KRATOS_CLASS_POINTER_DEFINITION(DistanceSpatialContainersConfigure);
///@}
///@name Life Cycle
///@{
/// Default constructor.
DistanceSpatialContainersConfigure() {}
/// Destructor.
virtual ~DistanceSpatialContainersConfigure() {}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
static data_type* AllocateData() {
return new data_type(27, NULL);
}
static void CopyData(data_type* source, data_type* destination) {
destination = source;
}
static void DeleteData(data_type* data) {
delete data;
}
static inline void CalculateBoundingBox(const PointerType& rObject, PointType& rLowPoint, PointType& rHighPoint)
{
rHighPoint = rObject->GetGeometry().GetPoint(0);
rLowPoint = rObject->GetGeometry().GetPoint(0);
for (unsigned int point = 0; point<rObject->GetGeometry().PointsNumber(); point++)
{
for(std::size_t i = 0; i<3; i++)
{
rLowPoint[i] = (rLowPoint[i] > rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rLowPoint[i];
rHighPoint[i] = (rHighPoint[i] < rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rHighPoint[i];
}
}
}
static inline void GetBoundingBox(const PointerType rObject, double* rLowPoint, double* rHighPoint)
{
for(std::size_t i = 0; i<3; i++)
{
rLowPoint[i] = rObject->GetGeometry().GetPoint(0)[i];
rHighPoint[i] = rObject->GetGeometry().GetPoint(0)[i];
}
for (unsigned int point = 0; point<rObject->GetGeometry().PointsNumber(); point++)
{
for(std::size_t i = 0; i<3; i++)
{
rLowPoint[i] = (rLowPoint[i] > rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rLowPoint[i];
rHighPoint[i] = (rHighPoint[i] < rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rHighPoint[i];
}
}
}
static inline bool Intersection(const PointerType& rObj_1, const PointerType& rObj_2)
{
Element::GeometryType& geom_1 = rObj_1->GetGeometry();
Element::GeometryType& geom_2 = rObj_2->GetGeometry();
return geom_1.HasIntersection(geom_2);
}
static inline bool IntersectionBox(const PointerType& rObject, const PointType& rLowPoint, const PointType& rHighPoint)
{
return rObject->GetGeometry().HasIntersection(rLowPoint, rHighPoint);
}
static inline bool IsIntersected(const PointerType& rObject, const double& tolerance, const double rLowPoint[], const double rHighPoint[])
{
Kratos::Element::GeometryType& geom_1 = rObject->GetGeometry();
Kratos::Point rLowPointTolerance;
Kratos::Point rHighPointTolerance;
for(std::size_t i = 0; i<3; i++)
{
rLowPointTolerance[i] = rLowPoint[i] * 1+tolerance;
rHighPointTolerance[i] = rHighPoint[i] * 1+tolerance;
}
return geom_1.HasIntersection(rLowPointTolerance,rHighPointTolerance);
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
return " Spatial Containers Configure";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const {}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const {}
///@}
protected:
private:
/// Assignment operator.
DistanceSpatialContainersConfigure& operator=(DistanceSpatialContainersConfigure const& rOther);
/// Copy constructor.
DistanceSpatialContainersConfigure(DistanceSpatialContainersConfigure const& rOther);
}; // Class DistanceSpatialContainersConfigure
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class CalculateSignedDistanceTo3DSkinProcess
: public Process
{
public:
///@name Type Definitions
///@{
/// Pointer definition of CalculateSignedDistanceTo3DSkinProcess
KRATOS_CLASS_POINTER_DEFINITION(CalculateSignedDistanceTo3DSkinProcess);
typedef DistanceSpatialContainersConfigure ConfigurationType;
typedef OctreeBinaryCell<ConfigurationType> CellType;
typedef OctreeBinary<CellType> OctreeType;
///@}
///@name Life Cycle
///@{
/// Constructor.
CalculateSignedDistanceTo3DSkinProcess(ModelPart& rThisModelPart)
: mrSkinModelPart(rThisModelPart), mrBodyModelPart(rThisModelPart)
{
}
/// Destructor.
virtual ~CalculateSignedDistanceTo3DSkinProcess()
{
}
///@}
///@name Operators
///@{
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
virtual void Execute()
{
KRATOS_TRY
//std::cout << "Generating the Octree..." << std::endl;
GenerateOctree();
//std::cout << "Generating the Octree finished" << std::endl;
GenerateCellNodalData();
CalculateDistance();
CalculateDistance2();
std::ofstream mesh_file1("octree1.post.msh");
std::ofstream res_file("octree1.post.res");
Timer::Start("Writing Gid conform Mesh");
PrintGiDMesh(mesh_file1);
PrintGiDResults(res_file);
// mOctree.PrintGiDMeshNew(mesh_file1);
Timer::Stop("Writing Gid conform Mesh");
KRATOS_WATCH(mrBodyModelPart);
KRATOS_CATCH("");
}
void GenerateOctree()
{
Timer::Start("Generating Octree");
for(ModelPart::NodeIterator i_node = mrSkinModelPart.NodesBegin() ; i_node != mrSkinModelPart.NodesEnd() ; i_node++)
{
double temp_point[3];
const Node<3>& r_node = *i_node;
temp_point[0] = r_node[0];
temp_point[1] = r_node[1];
temp_point[2] = r_node[2];
mOctree.Insert(temp_point);
}
mOctree.Constrain2To1();
for(ModelPart::ElementIterator i_element = mrSkinModelPart.ElementsBegin() ; i_element != mrSkinModelPart.ElementsEnd() ; i_element++)
{
mOctree.Insert(*(i_element).base());
}
Timer::Stop("Generating Octree");
// octree.Insert(*(mrSkinModelPart.ElementsBegin().base()));
KRATOS_WATCH(mOctree);
}
void GenerateCellNodalData()
{
Timer::Start("Generating Cell Nodal Data");
std::vector<OctreeType::cell_type*> all_leaves;
mOctree.GetAllLeavesVector(all_leaves);
#pragma omp parallel for
for (std::size_t i = 0; i < all_leaves.size(); i++)
{
*(all_leaves[i]->pGetDataPointer()) = ConfigurationType::AllocateData();
}
std::size_t last_id = mrBodyModelPart.NumberOfNodes() + 1;
for (std::size_t i = 0; i < all_leaves.size(); i++)
{
CellType* cell = all_leaves[i];
GenerateCellNode(cell,last_id);
}
Timer::Stop("Generating Cell Nodal Data");
}
void GenerateCellNode(CellType* pCell, std::size_t& LastId)
{
for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center
{
ConfigurationType::CellNodeData* p_node = (*(pCell->pGetData()))[i_pos];
if(p_node == 0)
{
CellType::key_type keys[3];
pCell->GetKey(i_pos,keys);
double new_point[3];
(*(pCell->pGetData()))[i_pos] = new ConfigurationType::CellNodeData;
(*(pCell->pGetData()))[i_pos]->Id() = LastId++;
(*(pCell->pGetData()))[i_pos]->X() = pCell->GetCoordinate(keys[0]);
(*(pCell->pGetData()))[i_pos]->Y() = pCell->GetCoordinate(keys[1]);
(*(pCell->pGetData()))[i_pos]->Z() = pCell->GetCoordinate(keys[2]);
mOctreeNodes.push_back((*(pCell->pGetData()))[i_pos]);
SetNodeInNeighbours(pCell,i_pos,(*(pCell->pGetData()))[i_pos]);
}
}
}
// void GenerateCellNode(CellType* pCell, std::size_t& LastId)
// {
// for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center
// {
// Node<3>* p_node = (*(pCell->pGetData()))[i_pos];
// if(p_node == 0)
// {
// CellType::key_type keys[3];
// pCell->GetKey(i_pos,keys);
//
// double new_point[3];
//
// new_point[0] = pCell->GetCoordinate(keys[0]);
// new_point[1] = pCell->GetCoordinate(keys[1]);
// new_point[2] = pCell->GetCoordinate(keys[2]);
//
//
// (*(pCell->pGetData()))[i_pos] = (mrBodyModelPart.CreateNewNode(++LastId, new_point[0], new_point[1], new_point[2])).get();
//
// SetNodeInNeighbours(pCell,i_pos,(*(pCell->pGetData()))[i_pos]);
// }
//
// }
// }
void SetNodeInNeighbours(CellType* pCell, int Position, ConfigurationType::CellNodeData* pNode)
{
CellType::key_type point_key[3];
pCell->GetKey(Position, point_key);
for (std::size_t i_direction = 0; i_direction < 8; i_direction++) {
CellType::key_type neighbour_key[3];
if (pCell->GetNeighbourKey(Position, i_direction, neighbour_key)) {
CellType* neighbour_cell = mOctree.pGetCell(neighbour_key);
if (!neighbour_cell || (neighbour_cell == pCell))
continue;
std::size_t position = neighbour_cell->GetLocalPosition(point_key);
if((*neighbour_cell->pGetData())[position])
{
//std::cout << "ERROR!! Bad Position calculated!!!!!!!!!!! position :" << position << std::endl;
continue;
}
(*neighbour_cell->pGetData())[position] = pNode;
}
}
}
void CalculateDistance()
{
Timer::Start("Calculate Distances");
ModelPart::NodesContainerType::ContainerType& nodes = mrBodyModelPart.NodesArray();
int nodes_size = nodes.size();
// first of all we reset the node distance to 1.00 which is the maximum distnace in our normalized space.
#pragma omp parallel for firstprivate(nodes_size)
for(int i = 0 ; i < nodes_size ; i++)
nodes[i]->GetSolutionStepValue(DISTANCE) = 1.00;
std::vector<CellType*> leaves;
mOctree.GetAllLeavesVector(leaves);
int leaves_size = leaves.size();
for(int i = 0 ; i < leaves_size ; i++)
CalculateNotEmptyLeavesDistance(leaves[i]);
#pragma omp parallel for firstprivate(nodes_size)
for(int i = 0 ; i < nodes_size ; i++)
{
CalculateNodeDistance(*(nodes[i]));
}
Timer::Stop("Calculate Distances");
}
void CalculateDistance2()
{
Timer::Start("Calculate Distances 2");
ModelPart::NodesContainerType::ContainerType& nodes = mrBodyModelPart.NodesArray();
int nodes_size = nodes.size();
// first of all we reste the node distance to 1.00 which is the maximum distnace in our normalized space.
#pragma omp parallel for firstprivate(nodes_size)
for(int i = 0 ; i < nodes_size ; i++)
nodes[i]->GetSolutionStepValue(DISTANCE) = 1.00;
std::vector<CellType*> leaves;
mOctree.GetAllLeavesVector(leaves);
int leaves_size = leaves.size();
for(int i = 0 ; i < leaves_size ; i++)
CalculateNotEmptyLeavesDistance(leaves[i]);
for(int i_direction = 0 ; i_direction < 1 ; i_direction++)
{
//#pragma omp parallel for firstprivate(nodes_size)
for(int i = 0 ; i < nodes_size ; i++)
{
if(nodes[i]->X() < 1.00 && nodes[i]->Y() < 1.00 && nodes[i]->Z() < 1.00)
// if((*nodes[i])[i_direction] == 0.00)
CalculateDistance(*(nodes[i]), i_direction);
}
}
Timer::Stop("Calculate Distances 2");
}
void CalculateDistance(Node<3>& rNode, int i_direction)
{
// double coords[3] = {rNode.X(), rNode.Y(), rNode.Z()};
// // KRATOS_WATCH_3(coords);
//
// //This function must color the positions in space defined by 'coords'.
// //coords is of dimension (3) normalized in (0,1)^3 space
//
// typedef Element::GeometryType triangle_type;
// typedef std::vector<std::pair<double, triangle_type*> > intersections_container_type;
//
// intersections_container_type intersections;
// std::vector<Node<3>*> nodes_array;
//
//
// const double epsilon = 1e-12;
//
// double distance = 1.0;
//
// // Creating the ray
// double ray[3] = {coords[0], coords[1], coords[2]};
// ray[i_direction] = 0; // starting from the lower extreme
//
// // KRATOS_WATCH_3(ray)
// GetIntersectionsAndNodes(ray, i_direction, intersections, nodes_array);
// // KRATOS_WATCH(nodes_array.size())
// for (int i_node = 0; i_node < nodes_array.size() ; i_node++)
// {
// double coord = nodes_array[i_node]->Coordinates()[i_direction];
// // KRATOS_WATCH(intersections.size());
//
// int ray_color= 1;
// std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin();
// while (i_intersection != intersections.end()) {
// double d = coord - i_intersection->first;
// if (d > epsilon) {
//
// ray_color = -ray_color;
// distance = d;
// } else if (d > -epsilon) {//interface
// distance = 0.00;
// break;
// } else {
// if(distance > -d)
// distance = -d;
// break;
// }
//
// i_intersection++;
// }
//
// distance *= ray_color;
//
// double& node_distance = nodes_array[i_node]->GetSolutionStepValue(DISTANCE);
// if(fabs(distance) < fabs(node_distance))
// node_distance = distance;
// else if (distance*node_distance < 0.00) // assigning the correct sign
// node_distance = -node_distance;
//
//
// }
}
void CalculateNotEmptyLeavesDistance(CellType* pCell)
{
typedef Element::GeometryType triangle_type;
typedef OctreeType::cell_type::object_container_type object_container_type;
object_container_type* objects = (pCell->pGetObjects());
// There are no intersection in empty cells
if (objects->empty())
return;
for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center
{
double distance = 1.00; // maximum distance is 1.00
for(object_container_type::iterator i_object = objects->begin(); i_object != objects->end(); i_object++)
{
CellType::key_type keys[3];
pCell->GetKey(i_pos,keys);
double cell_point[3];
//cell_point[0] = pCell->GetCoordinate(keys[0]);
//cell_point[1] = pCell->GetCoordinate(keys[1]);
//cell_point[2] = pCell->GetCoordinate(keys[2]);
double d = GeometryUtils::PointDistanceToTriangle3D((*i_object)->GetGeometry()[0], (*i_object)->GetGeometry()[1], (*i_object)->GetGeometry()[2], Point(cell_point[0], cell_point[1], cell_point[2]));
if(d < distance)
distance = d;
}
double& node_distance = (*(pCell->pGetData()))[i_pos]->Distance();
if(distance < node_distance)
node_distance = distance;
}
}
void CalculateNodeDistance(Node<3>& rNode)
{
double coord[3] = {rNode.X(), rNode.Y(), rNode.Z()};
double distance = DistancePositionInSpace(coord);
double& node_distance = rNode.GetSolutionStepValue(DISTANCE);
const double epsilon = 1.00e-12;
if(fabs(node_distance) > fabs(distance))
node_distance = distance;
else if (distance*node_distance < 0.00) // assigning the correct sign
node_distance = -node_distance;
}
double DistancePositionInSpace(double* coords)
{
//This function must color the positions in space defined by 'coords'.
//coords is of dimension (3) normalized in (0,1)^3 space
typedef Element::GeometryType triangle_type;
typedef std::vector<std::pair<double, triangle_type*> > intersections_container_type;
intersections_container_type intersections;
const int dimension = 3;
const double epsilon = 1e-12;
double distances[3] = {1.0, 1.0, 1.0};
for (int i_direction = 0; i_direction < dimension; i_direction++)
{
// Creating the ray
double ray[3] = {coords[0], coords[1], coords[2]};
ray[i_direction] = 0; // starting from the lower extreme
GetIntersections(ray, i_direction, intersections);
// if(intersections.size() == 1)
// KRATOS_WATCH_3(ray)
// KRATOS_WATCH(intersections.size());
int ray_color= 1;
std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin();
while (i_intersection != intersections.end()) {
double d = coords[i_direction] - i_intersection->first;
if (d > epsilon) {
ray_color = -ray_color;
distances[i_direction] = d;
// if(distances[i_direction] > d) // I think this is redundunt. Pooyan.
// {
// if(ray_color > 0.00)
// distances[i_direction] = d;
// else
// distances[i_direction] = -d;
// }
} else if (d > -epsilon) {//interface
distances[i_direction] = 0.00;
break;
} else {
if(distances[i_direction] > -d)
distances[i_direction] = -d;
break;
}
i_intersection++;
}
distances[i_direction] *= ray_color;
}
// if(distances[0]*distances[1] < 0.00 || distances[2]*distances[1] < 0.00)
// KRATOS_WATCH_3(distances);
#ifdef _DEBUG
std::cout << "colors : " << colors[0] << ", " << colors[1] << ", " << colors[2] << std::endl;
#endif
double distance = (fabs(distances[0]) > fabs(distances[1])) ? distances[1] : distances[0];
distance = (fabs(distance) > fabs(distances[2])) ? distances[2] : distance;
return distance;
}
void GetIntersectionsAndNodes(double* ray, int direction, std::vector<std::pair<double,Element::GeometryType*> >& intersections, std::vector<Node<3>*>& rNodesArray)
{
// //This function passes the ray through the model and gives the hit point to all objects in its way
// //ray is of dimension (3) normalized in (0,1)^3 space
// // direction can be 0,1,2 which are x,y and z respectively
//
// const double epsilon = 1.00e-12;
//
// // first clearing the intersections points vector
// intersections.clear();
//
// OctreeType* octree = &mOctree;
//
// OctreeType::key_type ray_key[3] = {octree->Key(ray[0]), octree->Key(ray[1]), octree->Key(ray[2])}; //ASK_TOKEN
// OctreeType::key_type cell_key[3];
//
// // getting the entrance cell from lower extreme
// ray_key[direction] = 0;
// OctreeType::cell_type* cell = octree->pGetCell(ray_key);
//
// while (cell) {
// std::size_t position = cell->GetLocalPosition(ray_key); // Is this the local position!?!?!?!
// OctreeType::key_type node_key[3];
// cell->GetKey(position, node_key);
// if((node_key[0] == ray_key[0]) && (node_key[1] == ray_key[1]) && (node_key[2] == ray_key[2]))
// {
// if(cell->pGetData())
// {
// if(cell->pGetData()->size() > position)
// {
// Node<3>* p_node = (*cell->pGetData())[position];
// if(p_node)
// {
// //KRATOS_WATCH(p_node->Id())
// rNodesArray.push_back(p_node);
// }
// }
// else
// KRATOS_WATCH(cell->pGetData()->size())
// }
// }
//
//
// // std::cout << ".";
// GetCellIntersections(cell, ray, ray_key, direction, intersections);
//
// // Add the cell's middle node if existed
// // cell->GetKey(8, cell_key); // 8 is the central position
// // ray_key[direction]=cell_key[direction]; // positioning the ray in the middle of cell in its direction
//
// // position = cell->GetLocalPosition(ray_key);
// // if(position < 27) // principal nodes
// // {
// // if(cell->pGetData())
// // {
// // if(cell->pGetData()->size() > position)
// // {
// // Node<3>* p_node = (*cell->pGetData())[position];
// // if(p_node)
// // {
// // //KRATOS_WATCH(p_node->Id())
// // rNodesArray.push_back(p_node);
// // }
// // }
// // else
// // KRATOS_WATCH(cell->pGetData()->size())
// // }
// // }
// // else
// // {
// // KRATOS_WATCH(position);
// // KRATOS_WATCH(*cell);
// // }
//
//
// // go to the next cell
// if (cell->GetNeighbourKey(1 + direction * 2, cell_key)) {
// ray_key[direction] = cell_key[direction];
// cell = octree->pGetCell(ray_key);
// ray_key[direction] -= 1 ;//the key returned by GetNeighbourKey is inside the cell (minkey +1), to ensure that the corresponding
// //cell get in pGetCell is the right one.
// #ifdef _DEBUG
// Octree_Pooyan::key_type min_key[3];
// cell->GetMinKey(min_key[0],min_key[1],min_key[2]);
// Octree_Pooyan::key_type tmp;
// tmp= min_key[direction];
// assert(ray_key[direction]==tmp);
// #endif
// } else
// cell = NULL;
// }
//
//
//
// // KRATOS_WATCH(rNodesArray.size());
// // now eliminating the repeated objects
// if (!intersections.empty()) {
// //sort
// std::sort(intersections.begin(), intersections.end());
// // unique
// std::vector<std::pair<double, Element::GeometryType*> >::iterator i_begin = intersections.begin();
// std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin();
// while (++i_begin != intersections.end()) {
// // considering the very near points as the same points
// if (fabs(i_begin->first - i_intersection->first) > epsilon) // if the hit points are far enough they are not the same
// *(++i_intersection) = *i_begin;
// }
// intersections.resize((++i_intersection) - intersections.begin());
//
// }
}
void GetIntersections(double* ray, int direction, std::vector<std::pair<double,Element::GeometryType*> >& intersections)
{
// //This function passes the ray through the model and gives the hit point to all objects in its way
// //ray is of dimension (3) normalized in (0,1)^3 space
// // direction can be 0,1,2 which are x,y and z respectively
//
// const double epsilon = 1.00e-12;
//
// // first clearing the intersections points vector
// intersections.clear();
//
// OctreeType* octree = &mOctree;
//
// OctreeType::key_type ray_key[3] = {octree->Key(ray[0]), octree->Key(ray[1]), octree->Key(ray[2])};
// OctreeType::key_type cell_key[3];
//
// // getting the entrance cell from lower extreme
// OctreeType::cell_type* cell = octree->pGetCell(ray_key);
//
// while (cell) {
// // std::cout << ".";
// GetCellIntersections(cell, ray, ray_key, direction, intersections);
// // go to the next cell
// if (cell->GetNeighbourKey(1 + direction * 2, cell_key)) {
// ray_key[direction] = cell_key[direction];
// cell = octree->pGetCell(ray_key);
// ray_key[direction] -= 1 ;//the key returned by GetNeighbourKey is inside the cell (minkey +1), to ensure that the corresponding
// //cell get in pGetCell is the right one.
// #ifdef _DEBUG
// Octree_Pooyan::key_type min_key[3];
// cell->GetMinKey(min_key[0],min_key[1],min_key[2]);
// Octree_Pooyan::key_type tmp;
// tmp= min_key[direction];
// assert(ray_key[direction]==tmp);
// #endif
// } else
// cell = NULL;
// }
//
//
// // now eliminating the repeated objects
// if (!intersections.empty()) {
// //sort
// std::sort(intersections.begin(), intersections.end());
// // unique
// std::vector<std::pair<double, Element::GeometryType*> >::iterator i_begin = intersections.begin();
// std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin();
// while (++i_begin != intersections.end()) {
// // considering the very near points as the same points
// if (fabs(i_begin->first - i_intersection->first) > epsilon) // if the hit points are far enough they are not the same
// *(++i_intersection) = *i_begin;
// }
// intersections.resize((++i_intersection) - intersections.begin());
//
// }
}
int GetCellIntersections(OctreeType::cell_type* cell, double* ray,
OctreeType::key_type* ray_key, int direction,
std::vector<std::pair<double, Element::GeometryType*> >& intersections) {
// //This function passes the ray through the cell and gives the hit point to all objects in its way
// //ray is of dimension (3) normalized in (0,1)^3 space
// // direction can be 0,1,2 which are x,y and z respectively
//
// typedef Element::GeometryType triangle_type;
// typedef OctreeType::cell_type::object_container_type object_container_type;
//
// object_container_type* objects = (cell->pGetObjects());
//
// // There are no intersection in empty cells
// if (objects->empty())
// return 0;
//
// // std::cout << "X";
// // calculating the two extreme of the ray segment inside the cell
// double ray_point1[3] = {ray[0], ray[1], ray[2]};
// double ray_point2[3] = {ray[0], ray[1], ray[2]};
// ray_point1[direction] = cell->GetCoordinate(ray_key[direction]);
// ray_point2[direction] = ray_point1[direction] + cell->GetSize();
//
// for (object_container_type::iterator i_object = objects->begin(); i_object != objects->end(); i_object++) {
// double intersection[3]={0.00,0.00,0.00};
//
// int is_intersected = IntersectionTriangleSegment((*i_object)->GetGeometry(), ray_point1, ray_point2, intersection); // This intersection has to be optimized for axis aligned rays
//
// if (is_intersected == 1) // There is an intersection but not coplanar
// intersections.push_back(std::pair<double, Element::GeometryType*>(intersection[direction], &((*i_object)->GetGeometry())));
// //else if(is_intersected == 2) // coplanar case
// }
//
// return 0;
}
int IntersectionTriangleSegment(Element::GeometryType& rGeometry, double* RayPoint1, double* RayPoint2, double* IntersectionPoint)
{
// This is the adaption of the implemnetation provided in:
// http://www.softsurfer.com/Archive/algorithm_0105/algorithm_0105.htm#intersect_RayTriangle()
const double epsilon = 1.00e-12;
array_1d<double,3> u, v, n; // triangle vectors
array_1d<double,3> dir, w0, w; // ray vectors
double r, a, b; // params to calc ray-plane intersect
// get triangle edge vectors and plane normal
u = rGeometry[1] - rGeometry[0];
v = rGeometry[2] - rGeometry[0];
MathUtils<double>::CrossProduct(n, u, v); // cross product
if (norm_2(n) == 0) // triangle is degenerate
return -1; // do not deal with this case
for(int i = 0 ; i < 3 ; i++)
{
dir[i] = RayPoint2[i] - RayPoint1[i]; // ray direction vector
w0[i] = RayPoint1[i] - rGeometry[0][i];
}
a = -inner_prod(n,w0);
b = inner_prod(n,dir);
if (fabs(b) < epsilon) { // ray is parallel to triangle plane
if (a == 0) // ray lies in triangle plane
return 2;
else return 0; // ray disjoint from plane
}
// get intersect point of ray with triangle plane
r = a / b;
if (r < 0.0) // ray goes away from triangle
return 0; // => no intersect
// for a segment, also test if (r > 1.0) => no intersect
for(int i = 0 ; i < 3 ; i++)
IntersectionPoint[i] = RayPoint1[i] + r * dir[i]; // intersect point of ray and plane
// is I inside T?
double uu, uv, vv, wu, wv, D;
uu = inner_prod(u,u);
uv = inner_prod(u,v);
vv = inner_prod(v,v);
for(int i = 0 ; i < 3 ; i++)
w[i] = IntersectionPoint[i] - rGeometry[0][i];
wu = inner_prod(w,u);
wv = inner_prod(w,v);
D = uv * uv - uu * vv;
// get and test parametric coords
double s, t;
s = (uv * wv - vv * wu) / D;
if (s < 0.0 - epsilon || s > 1.0 + epsilon) // I is outside T
return 0;
t = (uv * wu - uu * wv) / D;
if (t < 0.0 - epsilon || (s + t) > 1.0 + epsilon) // I is outside T
return 0;
return 1; // I is in T
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
return "CalculateSignedDistanceTo3DSkinProcess";
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
rOStream << "CalculateSignedDistanceTo3DSkinProcess";
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{
}
void PrintGiDMesh(std::ostream & rOStream) const {
std::vector<CellType*> leaves;
mOctree.GetAllLeavesVector(leaves);
std::cout << "writing " << leaves.size() << " leaves" << std::endl;
rOStream << "MESH \"leaves\" dimension 3 ElemType Hexahedra Nnode 8" << std::endl;
rOStream << "# color 96 96 96" << std::endl;
rOStream << "Coordinates" << std::endl;
rOStream << "# node_number coordinate_x coordinate_y coordinate_z " << std::endl;
for(DistanceSpatialContainersConfigure::data_type::const_iterator i_node = mOctreeNodes.begin() ; i_node != mOctreeNodes.end() ; i_node++)
{
rOStream << (*i_node)->Id() << " " << (*i_node)->X() << " " << (*i_node)->Y() << " " << (*i_node)->Z() << std::endl;
}
std::cout << "Nodes written..." << std::endl;
rOStream << "end coordinates" << std::endl;
rOStream << "Elements" << std::endl;
rOStream << "# element n1 n2 n3 n4 n5 n6 n7 n8" << std::endl;
for (std::size_t i = 0; i < leaves.size(); i++)
{
if ((leaves[i]->pGetData()))
{
DistanceSpatialContainersConfigure::data_type& nodes = (*(leaves[i]->pGetData()));
// std::cout << "Leave - Level: " << nodes[0]->Id() << " " << nodes[1]->Id() << " " << nodes[2]->Id() << " etc... " << std::endl;
rOStream << i + 1;
for(int j = 0 ; j < 8 ; j++)
rOStream << " " << nodes[j]->Id();
rOStream << std::endl;
}
}
rOStream << "end elements" << std::endl;
}
void PrintGiDResults(std::ostream & rOStream) const {
std::vector<CellType*> leaves;
mOctree.GetAllLeavesVector(leaves);
rOStream << "GiD Post Results File 1.0" << std::endl << std::endl;
rOStream << "Result \"Distance\" \"Kratos\" 1 Scalar OnNodes" << std::endl;
rOStream << "Values" << std::endl;
for(ModelPart::NodeIterator i_node = mrBodyModelPart.NodesBegin() ; i_node != mrBodyModelPart.NodesEnd() ; i_node++)
{
rOStream << i_node->Id() << " " << i_node->GetSolutionStepValue(DISTANCE) << std::endl;
}
rOStream << "End Values" << std::endl;
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
ModelPart& mrSkinModelPart;
ModelPart& mrBodyModelPart;
DistanceSpatialContainersConfigure::data_type mOctreeNodes;
OctreeType mOctree;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
CalculateSignedDistanceTo3DSkinProcess& operator=(CalculateSignedDistanceTo3DSkinProcess const& rOther);
/// Copy constructor.
//CalculateSignedDistanceTo3DSkinProcess(CalculateSignedDistanceTo3DSkinProcess const& rOther);
///@}
}; // Class CalculateSignedDistanceTo3DSkinProcess
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >> (std::istream& rIStream,
CalculateSignedDistanceTo3DSkinProcess& rThis);
/// output stream function
inline std::ostream& operator << (std::ostream& rOStream,
const CalculateSignedDistanceTo3DSkinProcess& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_CALCULATE_DISTANCE_PROCESS_H_INCLUDED defined
|
pem_fmt_plug.c | /* PEM (PKCS #8) cracker.
*
* This software is Copyright (c) 2015, Dhiru Kholia <kholia at kth.se>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* This code may be freely used and modified for any purpose.
*
* Big thanks to Martin Kleppmann, and Lapo Luchini for making this format
* possible.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pem;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pem);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <openssl/des.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "pbkdf2_hmac_sha1.h"
#include "jumbo.h"
#include "memdbg.h"
#include "asn1.h"
#define FORMAT_LABEL "PEM"
#define FORMAT_NAME "PKCS#8 private key (RSA/DSA/ECDSA)"
#define FORMAT_TAG "$PEM$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 3DES " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 3DES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(*fctx)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define SALTLEN 8 // XXX
#define IVLEN 8 // XXX
#define CTLEN 4096 // XXX
// $PEM$type$cipher$$salt$iterations$iv$blob_length$blob // type, and cipher should be enough for all possible combinations
static struct fmt_tests PEM_tests[] = {
/* https://github.com/bwall/pemcracker/blob/master/test.pem */
{FORMAT_TAG "1$1$0c71e1c801194282$2048$87120f8c098437d0$640$c4bc6bc5447bed58e6f945cd1fde56d52aa794bd64b3c509fead910b1e7c1be9b6a89666c572c8ba5e469c5732ff105ecb97875efc2490ea9659082bdf0f3a2fd263ceb86bde4807bb85b01ca25efe25655fcdb9d54db94f7f48bb9b04af2bad5c2aaed87dc289da8b3fb891a9797f73adacb05b21717fe11d4ebdf8f1d39ecfcb8791447263572f487b087484c02e40a13a89c0613ebc4958a0853781eb1382c1f9ac1f1f97dd06e1a26d98088c5f262680a33dbf2e7742a436847cead5af15c538a5eb21b99e0c4ca30d08f5e805678bdbae6a3ee53623b7cebaeac6c7dd54834f6806112909d2d74da35ea907d35cfbd9cfcca4c302e9dc19b3017957747b4525e7832067d462f15451ca47be771add080da835dc177c26df3dd3fbf4b44d0ac7aea30a44469fe542abaf9bb2787b5694c7fdc1b9765167bf9ea5bf695e927bb98217491d7f1843a1e39e2a1a6a178b03e391031a80943f08b6dd7fa5104e38c4a4bad773775a71f5d076641a52a1100e701c14e5a15ac3dbaefad5f2ceb3ccded6689aef2bc9060c36599580f34324ecfa8cf2628da6475934bb53a8f7ef4a07fc1d0a4d41bfc9bac91859ce98f3f8876bbfba01bbe4491fa2d511b2f6eb5ae7ad4213a24c21fef676c8713de9c674d7a88d8844765732dbab43ee9faa5245ddb6fd6d66ab301b82dca829aeacaa5f178cd12aa0565c8a2d6c00b8c7f611ceedeee8ea68d99582fe9ba701c46d1ea78c88bb942ee3e30e83d9843cbda720bd2dcc07f2b4497e781cd54156e5e1022c4fb5827ab4e0469bb40500a6978eed0f27e0258e7304b0745e90eb36bb8d8e1c15c458313c547c3bfe54d75ac26405c40cfa0fecbf2a95e312870c08b13e6b36494c09c8a8ef12057e090a24e05fd3", "komodia"},
// openssl pkcs8 -in test-rsa.pem -topk8 -v2 des3 -iter 2049 -out new.pem
{"$PEM$1$1$671f19f01d9d0275$2049$50524fb9fd8b147d$1224$cae9d4d53583f50d4c468eca9061458ff1316732d6f28a70f0a1740021f594c8738ca58bfa0e4eb97a826776c3dce6ab89dd71ad30bf7630ec2f1fb18d895954f42a61ce2529e26b7d868267c44b21c03fac11387ce1d5e5b88a75f2038737820ccc768c72e0cdd3d78ba912fa6255eb4e3738cdae60109be2450d053aa91fb62a312263f484eae6f1fb757cf7d92e63f066498e4ed809e5318143f48afde4398a695bbe6804148b319c4f54633f91a08fdcc373a4a66b6f14a2b659e149a25053ff5bc0035b58aa462c8558ab3aefdc2770bad36b5fde810d6fbf07c29ea8e3a72fbfaa1b977663f8b61129b50658866d4a39bb4e9da24b4ef226170a3d9ded7f99a4e6265ca65ba94078da5f2ade1567bc93812205e8ff085cb07479af22e261d1255e29b02aca3278ac29232a49d2656b217f4822d72c7dcd24d2fde44aab525f2bcf970627597b26cc540c9cf8112002fdb35c2fbf97d7532648fa2c3b0508d974b35713a1ff81ff44f8414867e4d8f6b4027ecfd47fd4992b3a3e6e29b43c6ae76c2d503bb5bb260655960b659e55af66254bbfb248a247df3294518fab8295640c4f317ab25adf345f8693dd89514472938da1801d405c3b75419d39d6fe9a554d798da2b26566eef4f7e360dfb2802f68f33d6d4fb756d2e2140f5fef476048fdd923371d6dd494b3aed07fd7733d03783921296ec39ab392ff13bfed5e2c52c1642d999c57635230a4fb7673c5a003bd6b407179a49b2967dd39b1668c35ed75f4517c08d8ee21186a15076fe289733eb4a9a6b90bc61c4ace009ffa40e7319e54006214297f2f219b9fc3c6931fac9568d2d5e457954a6a4999959cbee476e6142b5cc6a373fe7504fe41ac09b5d4f6af9e02357076556f787dc47c6ab9783fea53d1c87c65718a554c5fff242c15118c90f6f6a61e8a0427b98f5244b0f43138493393834f8991da9411b53e394615ebb3303018a905b41baa4be084b0c9008d257018add9278a676d53d812b6c494ebaff36509c9e82626a1c81ecba85ccd569fbebd7d6d546b45439315dc2a37fdffcb356e79122211ad295a2819b9ac30aa7344bc26b2bd618c15d6bd52c90741ef8c3baba7e54daee004c3ecadcda4fc2e63c769a98a540e12b1c37bb47935a5bbd82762e3be995244a766755c3007477b22392998694de7be8f695048870d78d4e57cc222cfae9251bc21ad4f6b3303473b0da554464862a24da4334701389730eae91b70c5ecdad201e7174ef7ec09928a84f4f64d5b8e7398bad1d25a4a9b17e0f58da58377ec796273f5bc48cdda81e9cf02434ee06f10f8330b54e0f102fd79105c2a4d85e4c5d275fe47107bd76d66b88b59489d7ca36c2e8a104426c6f34c48425ea33d610655178b13af409ff807cc196e48d4036e3d01e485ee0420f6ffbadfb142fd08459b0ff1c1c2d424aaa553bb73a90c19fa454b6f4ee9732f13d666b8fb8a86fe08b394ce94a0d68d091dfd124e386d19882782afaa9b97ce626123962e784c41398499ec1b8848be2b2c62597dfaf91d7e4cfef0a5b8bd4d9afa5824c3bb595029deb8b67c55d9eb976215a10e1846b1b82f0e1ad6968fbe2b98b3f50e0ec641dcbee8ed4c078ba09b2fea93800172fc0ae64f9ad510d59925f50a214168b431f1e88a26e77c4d507503f483bb1955b4cbc4571111dbbf1c78a1e4915ffba4be4fafcb22410032d86df1aa7e", "password"},
// openssl pkcs8 -in test-rsa.pem -topk8 -v2 des3 -iter 2047 -out new.pem
{"$PEM$1$1$029375ebb44d8c3f$2047$3c7dbbee4df5863e$1224$b97ff356c7687abcd4ea17527b45eaf84d69ac127ddc4b05383331a56e9a0c26661735f9fc5298fcef7fe4280ccafed909ef8494e8dcdc75ebf23daeb3eb28ce5e1e6181c050e6c9416b41176eb87a12ec6898b90a75b0deece18eb7d4c13372eedf1060ceac9230d77843a21dbfa24edd1e41d6aada961f205295198bec11e2d87ae5d2d07daf1b5f5a21455d68003ba40291c20b91114d9339b69a4564c749b64668b209f8a7cef029c8d7f6369c17ddc6bee527576c3da794aeb6125ce9f7d41fc8d5690fc2447a594721390d7803bc600e2c67801636072047f51ca1a9fff2d629e987aa18aa4b08d0f7dce547132d8073718ab2b1fb9ce7ce46551e82749f72ef228b6e8e4420395efb3e90ebe9cc15719f3a0afd71f387a2d432783804efdccf2b554fa4d60c1a5ff385ed784f1cb4b8fe013a08c08e1f9457897457f7e342a5071e471ad261708fd0cb9c75040a85ed27ac1079379557c4dcb74384701f6e30514e80788a543adb036135d94cbdf1feef5c0d287cc081fe75eddb29e37b462c4077bf07da74bb16ee96df3d7f1bcf616198e11d4c489eb33712b29e26c0d32df878074d7e145684cfec9d4f26e53d1cb10d45b13b55195ae9f6afa5c93b67e423558aa73cc4c6d83bb6ff80559076201b352e60f3bc0f018f79e6282fa6ce322f51703860f2da59606d8ab3433ced6359f3dee0d5b046929f1068903460cb84c5c2b2e2c478cc8547d88227aec9b2cf099d3a897778410a0e43138dc30f30768d3e90b675265f725e6b9cd7ca4a7db912c3e67ab2d680e8bf7e3f1ef9b9815b15873ee6a739972117dc4736cfe256be12b70ca1661cb9d32d69a396de5a0ceb81de37c1c395146f479b6e2b24017ee487b68e0b77bb4890533a50275caa68ffdc54cff2652fe94956d0b2c0463104a1b8e04f01f0c125e1125ce598a75d61152eabf97a58e6e789f60e240958b7e75ac208e48465150f389b9a5ff7ae5636cc29e72a573e8faf0ee80bd1a2a2e846a897019d75cad79b16a59be3da46a823baf9a04104d2d009e2780d21c3439c7e791f3ec63a296fbf4dc15e955e00e1be652cc70e930a08db2797694aeec3c20722b65e0cbaa8e3b753b3a51f3b16f32fbe55876f48615937e4ce9da7d985c8772923fce3cd6c463b422ce61fdfff8ba28df7a3cdc7253ad4ce0a35218962a45edc5dd3e24a2248e407d6106dab81cea41b453ac509c4f0ec03d220ff84c842755f4f8673c0975cac13f84f7176cc9c4cd27eb74b42065ea9a4853ef0d2940596f112f3c766db0b6c7e5d5d91bb0aad5e44e34abbc871dbfdb7824e014fa7d2ae62bd253f422482538c4c35dcb7f4a20c915b698262737df04bf7e6806d5bbfff7c54d6ba4c5892dcd122bc0fe80c7399228029cc4c29f388d9787c46d609abb2554a010984db73e8605272a1bd7570aca1ccc04edee3d704b7387bd9866423a015a88e4efced478c00210e213c3d2b2bebdf1584d9a8fb2a31397a12a2d07ecf6247c70d2950f0db3f64aad13647e7db47ca51d7c95f50fc016d9731c992f2463f794ea915b7b5307db6de25fbd3ba7a7b4b15f7a011ab399a2b8c73cd5a7a1b00743928499effb5ab1a402e8600c52f8d1204d8923c2d8e41cdd941d591b554f79dfee3c3eb33a427ab360f90a8820c2957e2b5afd06ea3f02df3563eec9a06f64a6e019e33ed0a112d53382d071cbf835907094158", "alonglongpassword"},
{"$PEM$1$1$74ae53fd1cf3e5e8$2048$33c1919f1cd1e8b8$336$6e59f6d3fbb084e224da89d23bfe0aec18f1491f58e334119ad83edd10d81b636622736e8a712a34959d78da79af603ec33d1a57bfaef2081e0ff8eccab31a0ad9cc18a60c20c1a2e15790c89972c5abb642a76ddeadf6fe8423c1b1737286a177b931352c5c78d105f828e9dc30fba659147f920aeaabb006988a020845faa985b948de42cc46b23406fffd2f05756c9e13e2fbc049c4be4736f9ec770c8da288a908e8abbbe1fe5c75cc65b7721d4eb338e67fe1bba937830cb9e857f3236a2894059bead0266e6ff78c7a52cab687b5e256bf1393674cdd857062d860434c530647d21edaa7f79b0e134de5cd536117ee5cbc49065c6142b30c1d3e5b0de8c55dd2748ba8bb5915498d5ed3c4abaedba13f4b10a8ff10d3383bce98dd3d52a6393ff1e791d9410bc90b34e115ed7ce10cdc75e6df29c31714983af39f1513395ef89cf2d57f68fc134996ef1afa0b", "dsa"},
{"$PEM$1$1$cbb6cdcfc1b27cc8$2048$9b9e633ba83d48c2$144$54f2ab743656618ae51062fd6f2ff07a5078dcf3a1fa52075f50f4508e0c342b1f3e29703f4932c689e29f385f7ad73bf96ec7bb536ea8dafd40b9e5aee6f3e27dc21ee538d9e146a9361fc34ae5dd818b23c106688a451a5e180362954698a35111cef9315ffcd6cb4d440a6899177ff0384a9533923c05f97a5bbd3f94415688ca5c3af97f9edab771dc84807a6bcc", "ecdsa"},
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked, cracked_count;
static struct format_context {
int salt_length;
unsigned char salt[SALTLEN];
int iv_length;
unsigned char iv[IVLEN];
int iterations;
int ciphertext_length;
unsigned char ciphertext[CTLEN];
} *fctx;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
cracked_count = self->params.max_keys_per_crypt;
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int len, value;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += TAG_LENGTH;
if ((p = strtokm(ctcopy, "$")) == NULL) // type
goto err;
value = atoi(p);
if (value != 1)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) // cipher
goto err;
value = atoi(p);
if (value != 1)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) // salt
goto err;
if(hexlenl(p) != 16)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) // iterations
goto err;
if ((p = strtokm(NULL, "$")) == NULL) // iv
goto err;
if(hexlenl(p) != 16)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) // ciphertext length
goto err;
len = atoi(p);
if ((p = strtokm(NULL, "*")) == NULL) // ciphertext
if(hexlenl(p) != len)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
fctx = mem_calloc_tiny(sizeof(struct format_context), MEM_ALIGN_WORD);
ctcopy += TAG_LENGTH;
p = strtokm(ctcopy, "$"); // type
p = strtokm(NULL, "$");
p = strtokm(NULL, "$"); // salt
for (i = 0; i < SALTLEN; i++)
fctx->salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
fctx->iterations = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < IVLEN; i++)
fctx->iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "$");
fctx->ciphertext_length = atoi(p);
p = strtokm(NULL, "$");
for (i = 0; i < fctx->ciphertext_length; i++)
fctx->ciphertext[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)fctx;
}
static void set_salt(void *salt)
{
fctx = (struct format_context *)salt;
}
static void PEM_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
/* The decrypted data should have a structure which is similar to,
*
* SEQUENCE(3 elem)
* INTEGER0
* SEQUENCE(2 elem)
* OBJECT IDENTIFIER1.2.840.113549.1.1.1
* NULL
* OCTET STRING(1 elem)
* SEQUENCE(9 elem)
* INTEGER0
* INTEGER(1024 bit) 163583298361518096026606050608205849417059808304583036000248988384009…
* INTEGER65537
* INTEGER(1024 bit) 117735944587247616941254265546766890629007951201899342739151083099399…
* INTEGER(512 bit) 1326824977515584662273167545044211564211924552512566340747744113458170…
* INTEGER(512 bit) 1232892816562888937701591901363879998543675433056414341240275826895052…
* INTEGER(512 bit) 1232481257247299197174170630936058522583110776863565636597653514732029…
* INTEGER(511 bit) 6306589984658176106246573218383922527912198486012975018041565347945398…
* INTEGER(512 bit) 1228874097888952320
*/
static int pem_decrypt(unsigned char *key, unsigned char *iv, unsigned char *data)
{
unsigned char out[CTLEN];
DES_cblock key1, key2, key3;
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
struct asn1_hdr hdr;
const uint8_t *pos, *end;
int length = fctx->ciphertext_length;
memset(out, 0, sizeof(out));
memcpy(key1, key, 8);
memcpy(key2, key + 8, 8);
memcpy(key3, key + 16, 8);
DES_set_key((DES_cblock *) key1, &ks1);
DES_set_key((DES_cblock *) key2, &ks2);
DES_set_key((DES_cblock *) key3, &ks3);
memcpy(ivec, iv, 8);
DES_ede3_cbc_encrypt(data, out, fctx->ciphertext_length, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
// padding byte can be 4 / 6 or so on!
if (check_pkcs_pad(out, fctx->ciphertext_length, 8) < 0)
return -1;
/* check message structure, http://lapo.it/asn1js/ is the best tool for learning this stuff */
// SEQUENCE
if (asn1_get_next(out, length, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_SEQUENCE) {
goto bad;
}
pos = hdr.payload;
end = pos + hdr.length;
// version Version (Version ::= INTEGER)
if (asn1_get_next(pos, end - pos, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_INTEGER) {
goto bad;
}
if (*(pos + 2) != 0) // *(pos + 1) == header length
goto bad;
if (hdr.length != 1)
goto bad;
pos = hdr.payload + hdr.length;
if (hdr.payload[0] != 0)
goto bad;
// SEQUENCE
if (asn1_get_next(pos, length, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_SEQUENCE) {
goto bad;
}
pos = hdr.payload; /* go inside this sequence */
// OBJECT IDENTIFIER (with value 1.2.840.113549.1.1.1, 1.2.840.10040.4.1 for DSA)
if (asn1_get_next(pos, length, &hdr) < 0 ||
hdr.class != ASN1_CLASS_UNIVERSAL ||
hdr.tag != ASN1_TAG_OID) {
goto bad;
}
if ((memcmp(hdr.payload, "\x2a\x86\x48\x86", 4) != 0) && (memcmp(hdr.payload, "\x2a\x86\x48\xce", 4) != 0))
goto bad;
return 0;
bad:
return -1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0])*cracked_count);
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][32];
int i;
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
pbkdf2_sha1_sse((const unsigned char**)pin, lens, fctx->salt, SALTLEN, fctx->iterations, pout, 24, 0);
#else
pbkdf2_sha1((unsigned char *)saved_key[index], strlen(saved_key[index]), fctx->salt, SALTLEN, fctx->iterations, master[0], 24, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
if(pem_decrypt(master[i], fctx->iv, fctx->ciphertext) == 0)
cracked[index+i] = 1;
else
cracked[index+i] = 0;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int iteration_count(void *salt)
{
struct format_context *my_fctx;
my_fctx = salt;
return (unsigned int) my_fctx->iterations;
}
struct fmt_main fmt_pem = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
PEM_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
PEM_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
pompi.h | /*
The MIT License (MIT)
Copyright (c) 2016 Lukáš Slouka
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/**
* @file pompi.h
* @brief This file contains all necessary tools for successful monitoring
* of OpenMP multithreaded applications using Performance Application
* Programmable Interface (PAPI) in C++ code. For all intents and purposes
* it acts like a wrapper.
* @author Lukas Slouka <lukslouka@gmail.com>
* @version 1.0
*/
#pragma once
#include <papi.h>
#include <vector>
#include <string>
#include <stdio.h>
#include <cstring>
#include <algorithm>
#ifdef _OPENMP
#include <omp.h>
#endif
/// pompi namespace.
namespace pompi
{
/// Used in methods that support output on either only one thread or all threads.
#define ALL_THREADS -1
// Arbitrary boundary for max events
#define MAX_EVENTS 20
int g_eventset = PAPI_NULL;
#ifdef _OPENMP
#pragma omp threadprivate(g_eventset)
#endif
long long g_start_counter_values[MAX_EVENTS];
#ifdef _OPENMP
#pragma omp threadprivate(g_start_counter_values)
#endif
long long g_end_counter_values[MAX_EVENTS];
#ifdef _OPENMP
#pragma omp threadprivate(g_start_counter_values)
#endif
// Add new output formats here
/**
* Output formats enum.
* Contains all available kinds of output format.
*/
enum OutputFormat
{
GNUPLOT, /**< Formats performance data to be usable with GNUplot. */
};
// Add new derived stats here
/**
* Perived statistics available from PAPI counters.
* Contains all available statistics derived from standard PAPI events.
*/
enum PapiDerivedStat
{
D_L1_TMR, /**< Derived L1 cache total missrate [%]*/
D_L2_TMR, /**< Derived L2 cache total missrate [%]*/
D_L3_TMR, /**< Derived L3 cache total missrate [%]*/
D_L1_DMR, /**< Derived L1 cache data missrate [%]*/
D_L2_DMR, /**< Derived L2 cache data missrate [%]*/
D_L3_DMR, /**< Derived L3 cache data missrate [%]*/
D_GIPC, /**< Derived graduated instructions per second*/
D_IIPC, /**< Derived issued instructions per second*/
D_BR_MPR, /**< Derived branch missprediction rate [%]*/
D_MFLOPS, /**< Derived MFLOPS*/
D_GFLOPS, /**< Derived GFLOPS*/
D_MIPS, /**< Derived MIPS*/
D_GIPS, /**< Derived GIPS*/
D_TLBM_PC, /**< Derived TLB data misses per count */
D_TLBM_PS, /**< Derived TLB data misses per second */
};
/**
* Pompi timer class
* Timer class that can be used by itself, but is part of wrapper
*/
class PompiTimer
{
private:
/**
* First call to timer
*/
double first_call_time_;
long long first_call_time_usec_;
/**
* Last call to timer
*/
double last_call_time_;
long long last_call_time_usec_;
/**
* Inner timer call (does not change first call)
*/
double inner_call_time_;
long long inner_call_time_usec_;
/**
* Inner calls aggregated duration
*/
double inner_calls_sum_;
long long inner_calls_sum_usec_;
/**
* Inner calls count
*/
unsigned long int repetition_count_;
public:
/**
* A constructor
* Initializes all counters to zero
*/
PompiTimer()
{
first_call_time_ = 0.0f;
first_call_time_usec_ = 0;
last_call_time_ = 0.0f;
last_call_time_usec_ = 0;
inner_call_time_ = 0.0f;
inner_call_time_usec_ = 0;
inner_calls_sum_ = 0.0f;
inner_calls_sum_usec_ = 0;
repetition_count_ = 0;
}
/**
* Begins timing
* Sets first call if it is zero, otherwise sets inner call time
*/
void BeginTiming()
{
#ifdef _OPENMP
inner_call_time_ = omp_get_wtime();
if(first_call_time_ == 0.0f)
first_call_time_ = inner_call_time_;
#endif
inner_call_time_usec_ = PAPI_get_real_usec();
if(first_call_time_usec_ == 0)
first_call_time_usec_ = inner_call_time_usec_;
}
/**
* Ends timing
*/
void EndTiming()
{
#ifdef _OPENMP
last_call_time_ = omp_get_wtime();
inner_calls_sum_ += last_call_time_ - inner_call_time_;
#endif
last_call_time_usec_ = PAPI_get_real_usec();
inner_calls_sum_usec_ += last_call_time_usec_ - inner_call_time_usec_;
repetition_count_++;
}
/**
* Clears entire timer
*/
void ResetTimer()
{
first_call_time_ = 0.0f;
first_call_time_usec_ = 0;
last_call_time_ = 0.0f;
last_call_time_usec_ = 0;
inner_call_time_ = 0.0f;
inner_call_time_usec_ = 0;
inner_calls_sum_ = 0.0f;
inner_calls_sum_usec_ = 0;
repetition_count_ = 0;
}
/**
* @return First timer call since latest reset
*/
double GetFirstCallTime()
{
#ifndef _OPENMP
return first_call_time_usec_ / 1e6;
#endif
return first_call_time_;
}
long long GetFirstCallTimeUsec()
{
return first_call_time_usec_;
}
/**
* @return Last timer call since latest reset
*/
double GetLastCallTime()
{
#ifndef _OPENMP
return last_call_time_usec_ / 1e6;
#endif
return last_call_time_;
}
long long GetLastCallTimeUsec()
{
return last_call_time_usec_;
}
/**
* @return Number of calls since lates reset
*/
double GetRepetitionCount()
{
return repetition_count_;
}
void SetRepetitionCount(unsigned long int count)
{
repetition_count_ = count;
}
/**
* @return Sum of all calls since latest reset
*/
double GetAggregatedTime()
{
#ifndef _OPENMP
return inner_calls_sum_usec_ / 1e6;
#endif
return inner_calls_sum_;
}
long long GetAggregatedTimeUsec()
{
return inner_calls_sum_usec_;
}
/**
* @return Duration between first and last call since latest reset
*/
double GetTotalTime()
{
#ifndef _OPENMP
return GetTotalTimeUsec() / 1e6;
#endif
return last_call_time_ - first_call_time_;
}
long long GetTotalTimeUsec()
{
return last_call_time_usec_ - first_call_time_usec_;
}
/**
* @return Average time over aggregated time
*/
double GetAverageTimeOverAggregated()
{
#ifndef _OPENMP
return GetAverageTimeOverAggregatedUsec() / 1e6;
#endif
return inner_calls_sum_/repetition_count_;
}
double GetAverageTimeOverAggregatedUsec()
{
return inner_calls_sum_usec_/repetition_count_;
}
/**
* @return Average time over total time
*/
double GetAverageTimeOverTotal()
{
#ifndef _OPENMP
return GetAverageTimeOverTotalUsec() / 1e6;
#endif
return GetTotalTime()/repetition_count_;
}
double GetAverageTimeOverTotalUsec()
{
return GetTotalTimeUsec()/repetition_count_;
}
};
/**
* Pompi Base class.
* This class is heart and soul of pompi wrapper. Contains all methods
* and attributes needed for successful monitoring of performance.
*/
class Base
{
private:
/**
* Pompi timer
* Used for all time measurements
* @see Start()
* @see End()
*/
PompiTimer timer_;
/**
* Vector of papi event codes.
* Dependant on PAPI_EVENTS enviroment variable.
*/
std::vector< int > papi_events_;
/**
* Vector of papi event names.
* Content corresponds with papi_events_.
*/
std::vector< std::string > papi_event_names_;
/**
* Vector of thread hardware counter values.
* Filled by each thread after call to Stop.
* @see ClearCounters()
* @see GetCounters()
* @see Stop()
*/
std::vector< std::vector< long long > > thread_data_;
/**
* Maximum number of threads.
* Initialized by call to omp_get_max_threads().
*/
int max_threads_;
/**
* Maximum number of events according to number of available hw counters
*/
int hw_counters_;
public:
/**
* Base constructor
* Initializes PAPI library and PAPI library threading
* support for OpenMP. Parses PAPI_EVENTS enviroment variable
* and modifies papi_events_ and papi_event_names_ accordingly.
*/
Base();
/**
* Base destructor
*/
~Base()
{
PAPI_shutdown();
}
public:
/**
* Tells pompi to monitor additional event.
* @param event C string name of PAPI event
*/
void AddEvent(char * event);
/**
* Starts counting of all monitored events on all threads.
* This method is complemented by Stop method.
* @warning Must be called inside of a OpenMP parallel region
* @see Stop()
*/
void Start();
/**
* Stops counting of all monitored events on all threads
* This method is complemented by Star method.
* @warning Must be called inside of a OpenMP parallel region
* @see Start()
*/
void Stop();
/**
* Sets repetition count
* @param count new count
*/
void SetRepetitionCount(unsigned long int count);
/**
* Provides accumulated execution time betwwn all pairs of Start()
* and Stop() methods. This behaviour is reset by ClearTimers().
* @return execution time
* @see Start()
* @see Stop()
* @see ClearTimers()
*/
double GetExecutionTime();
/**
* Similar to GetExecutionTime. Used to calculate execution time
* averaged on number of trials.
* @return average execution time.
* @see GetExecutionTime()
*/
double GetAverageExecutionTime();
/**
* Sets all counters to zero. Number of counter depends on
* maximum possible threads and count of monitored papi events.
* Can be used to clear counters of any singular thread or all of them.
* @param thread_id Integer ID of a thread. If thread with thread_id
* does not exist, all counters are cleared.
*/
void ClearCounters(int thread_id = ALL_THREADS);
/**
* Sets both execution time timers to zero.
*/
void ClearTimers();
/**
* Prints results on standard output. Can be used to output counters
* of any singular thread or all threads aggregated (implicit).
* @param value Any value selected by user (eg. total number of threads).
* @param thread_id ID of a thread to be outputted (invalid thread ID
* results in aggregated output on all threads).
*/
template < typename T >
void PrintResults(T value, int thread_id = ALL_THREADS);
/**
* Simillar to PrintResults. Takes additional file description
* parameters to define output file.
* @param value Any value selected by user (eg. total number of threads).
* @param file_name C string name of a output file.
* @param format Output format.
* @param thread_id ID of a thread to be outputted (invalid thread ID
* results in aggregated output on all threads).
*/
template < typename T >
void PrintResultsToFile(T value, const char * file_name, OutputFormat format, int thread_id = ALL_THREADS);
private:
/**
* Private method used to obtain counters
* @param counters array of values where counters are to be saved.
* @param thread_id Counters of a thread with this ID will be extracted.
*/
void GetCounters(long long * counters, int thread_id = ALL_THREADS);
/**
* Private method used to get index of an event within papi_events_.
* @param event_code Integer event code.
* @return index.
*/
int GetEventIndex(int event_code);
/**
* Private predicate checking availablity of PAPI event within
* monitored events.
* @param event_code Integer event code.
* @return Boolean result of predicate.
*/
bool EventAvailable(int event_code);
/**
* Private method obtaining all derived event that are available.
* @param stats Vector filled with derived stats.
*/
void GetDerivedStats(std::vector< PapiDerivedStat > & stats);
/**
* Private method translating derived stat code to string.
* @param stat Derived stat code.
* @return String representation of derived stat.
*/
std::string GetDerivedStatName(PapiDerivedStat stat);
/**
* Private generic method that computes derived stat value.
* @param stat Derived stat to be computed
* @param thread_id Derived stat will be computed for a thread with
* this thread_id. In case thread_id is invalid,
* the stat will be computed aggregated for all threads.
* @return Derived stat value
*/
double ComputeDerivedStat(PapiDerivedStat stat, int thread_id = ALL_THREADS);
/**
* Private generic method defining print format for GNUPLOT
* @param value Any value selected by user (eg. total number of threads).
* @param output Output stream
* @param thread_id Output will be performed for thread with this ID.
* In case thread_id is invalid, output will be
* performed aggregated for all threads.
*/
template < typename T >
void PrintGnuplot(T value, FILE * output, int thread_id = ALL_THREADS);
};
/////////////////////////////////////
// Implementation PUBLIC METHODS //
/////////////////////////////////////
Base::Base()
{
#ifdef _OPENMP
max_threads_ = 24;//omp_get_max_threads();
printf("[Log] OpenMP has been found, Maximum number of threads is %d\n", max_threads_);
#else
max_threads_ = 1;
printf("[Log] OpenMP not found, POMPI will be operating in single thread mode\n");
#endif
thread_data_.resize(max_threads_);
timer_.ResetTimer();
int PAPI_return_value = PAPI_library_init(PAPI_VER_CURRENT);
if(PAPI_return_value != PAPI_VER_CURRENT)
{
fprintf(stderr, "[Error] Could not initialize PAPI library\n");
exit(1);
}
printf("[Log] PAPI library successfully initialized\n");
hw_counters_ = PAPI_num_counters();
if(hw_counters_ > MAX_EVENTS)
hw_counters_ = MAX_EVENTS;
printf("[Log] Maximum number of events is %d\n", hw_counters_);
#ifdef _OPENMP
PAPI_return_value = PAPI_thread_init((unsigned long (*)(void)) (omp_get_thread_num));
if(PAPI_return_value != PAPI_OK)
{
fprintf(stderr, "[Error] Coult not initialize OpenMP threading for PAPI\n");
exit(1);
}
printf("[Log] OpenMP threading for PAPI successfully initialized\n");
#endif
// Resolving PAPI events from PAPI_EVENTS enviroment variable
char * papi_events = getenv("PAPI_EVENTS");
char * event = strtok(papi_events, "|");
int event_id;
while(event != NULL)
{
AddEvent(event);
event = strtok(NULL, "|");
}
}
void Base::AddEvent(char * event)
{
if(papi_events_.size() > hw_counters_)
{
fprintf(stderr, "[Warning] Cannot add any more events, skipping %s\n", event);
return;
}
int event_id;
int PAPI_return_value = PAPI_event_name_to_code(event, &event_id);
if(PAPI_return_value != PAPI_OK)
fprintf(stderr, "[Warning] Papi event `%s` does not exists, skipping\n", event);
else
{
if(std::find(papi_events_.begin(), papi_events_.end(), event_id) == papi_events_.end())
{
papi_event_names_.push_back(std::string(event));
papi_events_.push_back(event_id);
for(int i = 0; i < max_threads_; ++i)
thread_data_[i].push_back(0);
printf("[Log] Adding papi event `%s`\n", event);
}
else
fprintf(stderr, "[Warning] Papi event `%s` already listed, skipping\n", event);
}
}
void Base::Start()
{
timer_.BeginTiming();
#ifdef _OPENMP
#pragma omp parallel
{
PAPI_register_thread();
int thread_id = PAPI_thread_id();
int PAPI_return_value = PAPI_create_eventset(&g_eventset);
if(PAPI_return_value != PAPI_OK)
{
#pragma omp critical
fprintf(stderr, "[Error] Failed to initialize PAPI event set for thread #%d\n", thread_id);
}
PAPI_return_value = PAPI_add_events(g_eventset, &papi_events_[0], papi_events_.size());
if(PAPI_return_value != PAPI_OK)
{
#pragma omp critical
fprintf(stderr, "[Error] Failed to add events to event set for thread #%d\n", thread_id);
}
PAPI_return_value = PAPI_start(g_eventset);
if(PAPI_return_value != PAPI_OK)
{
#pragma omp critical
fprintf(stderr, "[Error] Failed to start counting in thread #%d\n", thread_id);
}
PAPI_return_value = PAPI_read(g_eventset, g_start_counter_values);
if(PAPI_return_value != PAPI_OK)
{
#pragma omp critical
fprintf(stderr, "[Error] Failed to read counters in thread #%d\n", thread_id);
}
}
#else
int PAPI_return_value = PAPI_create_eventset(&g_eventset);
if(PAPI_return_value != PAPI_OK)
fprintf(stderr, "[Error] Failed to initialize PAPI event set\n");
PAPI_return_value = PAPI_add_events(g_eventset, &papi_events_[0], papi_events_.size());
if(PAPI_return_value != PAPI_OK)
fprintf(stderr, "[Error] Failed to add events to event set\n");
PAPI_return_value = PAPI_start(g_eventset);
if(PAPI_return_value != PAPI_OK)
fprintf(stderr, "[Error] Failed to start counting\n");
PAPI_return_value = PAPI_read(g_eventset, g_start_counter_values);
if(PAPI_return_value != PAPI_OK)
fprintf(stderr, "[Error] Failed to read counters\n");
#endif
}
void Base::Stop()
{
#ifdef _OPENMP
#pragma omp parallel
{
int thread_id = PAPI_thread_id();
int PAPI_return_value = PAPI_read(g_eventset, g_end_counter_values);
if(PAPI_return_value != PAPI_OK)
{
#pragma omp critical
fprintf(stderr, "[Error] Failed to read counters in thread #%d\n", thread_id);
}
for(int event = 0; event < papi_events_.size(); ++event)
thread_data_[thread_id][event] += g_end_counter_values[event];
PAPI_return_value = PAPI_stop(g_eventset, NULL);
if(PAPI_return_value != PAPI_OK)
{
#pragma omp critical
fprintf(stderr, "[Error] Failed to stop counting in thread #%d\n", thread_id);
}
PAPI_return_value = PAPI_cleanup_eventset(g_eventset);
if(PAPI_return_value != PAPI_OK)
{
#pragma omp critical
fprintf(stderr, "[Error] Failed to clean up event set in thread #%d\n", thread_id);
}
PAPI_return_value = PAPI_destroy_eventset(&g_eventset);
if(PAPI_return_value != PAPI_OK)
{
#pragma omp critical
fprintf(stderr, "[Error] Failed to destroy event set in thread #%d\n", thread_id);
}
PAPI_unregister_thread();
}
#else
int PAPI_return_value = PAPI_read(g_eventset, g_end_counter_values);
if(PAPI_return_value != PAPI_OK)
fprintf(stderr, "[Error] Failed to read counters\n");
for(int event = 0; event < papi_events_.size(); ++event)
thread_data_[0][event] += g_end_counter_values[event];
PAPI_return_value = PAPI_stop(g_eventset, NULL);
if(PAPI_return_value != PAPI_OK)
fprintf(stderr, "[Error] Failed to stop counting\n");
PAPI_return_value = PAPI_cleanup_eventset(g_eventset);
if(PAPI_return_value != PAPI_OK)
fprintf(stderr, "[Error] Failed to clean up event set\n");
PAPI_return_value = PAPI_destroy_eventset(&g_eventset);
if(PAPI_return_value != PAPI_OK)
fprintf(stderr, "[Error] Failed to destroy event set\n");
#endif
timer_.EndTiming();
}
void Base::SetRepetitionCount(unsigned long int count)
{
timer_.SetRepetitionCount(count);
}
double Base::GetExecutionTime()
{
return timer_.GetAggregatedTime();
}
double Base::GetAverageExecutionTime()
{
return timer_.GetAverageTimeOverAggregated();
}
void Base::ClearCounters(int thread_id)
{
if((thread_id < 0)||(thread_id > max_threads_))
{
for(int thread = 0; thread < max_threads_; ++thread)
for(int event = 0; event < papi_events_.size(); ++event)
thread_data_[thread][event] = 0;
}
else
{
for(int event = 0; event < papi_events_.size(); ++event)
thread_data_[thread_id][event] = 0;
}
}
void Base::ClearTimers()
{
timer_.ResetTimer();
}
//////////////////////////////////////
// Implementation PRIVATE methods //
//////////////////////////////////////
void Base::GetCounters(long long * counters, int thread_id)
{
for(int event = 0; event < papi_events_.size(); ++event)
counters[event] = 0;
if(thread_id == ALL_THREADS)
{
for(int thread = 0; thread < max_threads_; ++thread)
for(int event = 0; event < papi_events_.size(); ++event)
counters[event] += thread_data_[thread][event];
}
else
{
for(int event = 0; event < papi_events_.size(); ++event)
counters[event] = thread_data_[thread_id][event];
}
}
bool Base::EventAvailable(int event_code)
{
std::vector<int>::iterator event;
event = std::find(papi_events_.begin(), papi_events_.end(), event_code);
return (event == papi_events_.end()) ? false : true;
}
int Base::GetEventIndex(int event_code)
{
for(int index = 0; index < papi_events_.size(); ++index)
if(papi_events_[index] == event_code)
return index;
return 0;
}
// Add new derived stats here
void Base::GetDerivedStats(std::vector< PapiDerivedStat > &stats)
{
if (EventAvailable(PAPI_LD_INS) && EventAvailable(PAPI_SR_INS) && EventAvailable(PAPI_L1_TCM))
stats.push_back(D_L1_TMR);
if (EventAvailable(PAPI_L2_TCA) && EventAvailable(PAPI_L2_TCM))
stats.push_back(D_L2_TMR);
if (EventAvailable(PAPI_L3_TCA) && EventAvailable(PAPI_L3_TCM))
stats.push_back(D_L3_TMR);
if (EventAvailable(PAPI_L1_DCA) && EventAvailable(PAPI_L1_DCM))
stats.push_back(D_L1_DMR);
if (EventAvailable(PAPI_L2_DCA) && EventAvailable(PAPI_L2_DCM))
stats.push_back(D_L2_DMR);
if (EventAvailable(PAPI_L3_DCA) && EventAvailable(PAPI_L3_DCM))
stats.push_back(D_L3_DMR);
if (( EventAvailable(PAPI_BR_MSP) && EventAvailable(PAPI_BR_CN) )||
( EventAvailable(PAPI_BR_MSP) && EventAvailable(PAPI_BR_PRC) ))
stats.push_back(D_BR_MPR);
if (EventAvailable(PAPI_TOT_INS) && EventAvailable(PAPI_TOT_CYC))
stats.push_back(D_GIPC);
if (EventAvailable(PAPI_TOT_IIS) && EventAvailable(PAPI_TOT_CYC))
stats.push_back(D_IIPC);
if (EventAvailable(PAPI_FP_INS))
{
stats.push_back(D_MFLOPS);
stats.push_back(D_GFLOPS);
}
if (EventAvailable(PAPI_TOT_INS))
{
stats.push_back(D_MIPS);
stats.push_back(D_GIPS);
}
if (EventAvailable(PAPI_TLB_DM))
{
stats.push_back(D_TLBM_PC);
stats.push_back(D_TLBM_PS);
}
}
double Base::ComputeDerivedStat(PapiDerivedStat stat, int thread_id)
{
long long counters[papi_events_.size()];
GetCounters(counters, thread_id);
// Add new derived stat computations here
switch(stat)
{
case D_L1_TMR: {
return 100.0 * (counters[GetEventIndex(PAPI_L1_TCM)] / (double)(counters[GetEventIndex(PAPI_SR_INS)] + counters[GetEventIndex(PAPI_LD_INS)]));
}
case D_L2_TMR: {
return 100.0 * (counters[GetEventIndex(PAPI_L2_TCM)] / (double)counters[GetEventIndex(PAPI_L2_TCA)]);
}
case D_L3_TMR: {
return 100.0 * (counters[GetEventIndex(PAPI_L3_TCM)] / (double)counters[GetEventIndex(PAPI_L3_TCA)]);
}
case D_L1_DMR: {
return 100.0 * (counters[GetEventIndex(PAPI_L1_DCM)] / (double)counters[GetEventIndex(PAPI_L1_DCA)]);
}
case D_L2_DMR: {
return 100.0 * (counters[GetEventIndex(PAPI_L2_DCM)] / (double)counters[GetEventIndex(PAPI_L2_DCA)]);
}
case D_L3_DMR: {
return 100.0 * (counters[GetEventIndex(PAPI_L3_DCM)] / (double)counters[GetEventIndex(PAPI_L3_DCA)]);
}
case D_BR_MPR: {
if(EventAvailable(PAPI_BR_CN))
return 100.0 * (counters[GetEventIndex(PAPI_BR_MSP)] / (double)counters[GetEventIndex(PAPI_BR_CN)]);
else
return 100.0 * (counters[GetEventIndex(PAPI_BR_MSP)] / (double)(counters[GetEventIndex(PAPI_BR_MSP)] + counters[GetEventIndex(PAPI_BR_PRC)]));
}
case D_GIPC: {
return (static_cast<double>(counters[GetEventIndex(PAPI_TOT_INS)]) / counters[GetEventIndex(PAPI_TOT_CYC)]);
}
case D_IIPC: {
return (static_cast<double>(counters[GetEventIndex(PAPI_TOT_IIS)]) / counters[GetEventIndex(PAPI_TOT_CYC)]);
}
case D_MFLOPS: {
return (static_cast<double>(counters[GetEventIndex(PAPI_FP_INS)]) / timer_.GetAggregatedTime()) / 1e6;
}
case D_GFLOPS: {
return (static_cast<double>(counters[GetEventIndex(PAPI_FP_INS)]) / timer_.GetAggregatedTime()) / 1e9;
}
case D_MIPS: {
return (static_cast<double>(counters[GetEventIndex(PAPI_TOT_INS)]) / timer_.GetAggregatedTime()) / 1e6;
}
case D_GIPS: {
return (static_cast<double>(counters[GetEventIndex(PAPI_TOT_INS)]) / timer_.GetAggregatedTime()) / 1e9;
}
case D_TLBM_PC: {
return (static_cast<double>(counters[GetEventIndex(PAPI_TLB_DM)]) / timer_.GetRepetitionCount());
}
case D_TLBM_PS: {
return (static_cast<double>(counters[GetEventIndex(PAPI_TLB_DM)]) / timer_.GetAggregatedTime());
}
}
return 0;
}
// Add new derived stat translations here
std::string Base::GetDerivedStatName(PapiDerivedStat stat)
{
switch(stat)
{
case D_L1_TMR: {
return "D_L1_TMR";
}
case D_L2_TMR: {
return "D_L2_TMR";
}
case D_L3_TMR: {
return "D_L3_TMR";
}
case D_L1_DMR: {
return "D_L1_DMR";
}
case D_L2_DMR: {
return "D_L2_DMR";
}
case D_L3_DMR: {
return "D_L3_DMR";
}
case D_BR_MPR: {
return "D_BR_MPR";
}
case D_GIPC: {
return "D_GIPC";
}
case D_IIPC: {
return "D_IIPC";
}
case D_MFLOPS: {
return "D_MFLOPS";
}
case D_GFLOPS: {
return "D_GFLOPS";
}
case D_MIPS: {
return "D_MIPS";
}
case D_GIPS: {
return "D_GIPS";
}
case D_TLBM_PC: {
return "D_TLBM_PC";
}
case D_TLBM_PS: {
return "D_TLBM_PS";
}
}
return "";
}
template < typename T >
void Base::PrintResults(T value, int thread_id)
{
std::vector< PapiDerivedStat > stats;
GetDerivedStats(stats);
if((thread_id >= 0)&&(thread_id <= max_threads_))
printf("Results for thread #%d\n", thread_id);
else
{
printf("Aggregated results on all threads\n");
thread_id = ALL_THREADS;
}
double t_value = static_cast<double>(value);
printf("Parameter value: %f\n", t_value);
long long results[papi_events_.size()];
GetCounters(results, thread_id);
for(int event = 0; event < papi_events_.size(); ++event)
printf("%-15s:%20d\n", papi_event_names_[event].c_str(), results[event]);
for(int d_event = 0; d_event < stats.size(); ++d_event)
printf("%-15s:%20f\n", GetDerivedStatName(stats[d_event]).c_str(), ComputeDerivedStat(stats[d_event], thread_id));
}
template < typename T >
void Base::PrintResultsToFile(T value, const char * file_name, OutputFormat format, int thread_id)
{
if((thread_id < 0)||(thread_id > max_threads_))
thread_id = ALL_THREADS;
FILE * output;
output = fopen(file_name, "a");
// Add new output format methods here
switch(format)
{
case GNUPLOT: {
PrintGnuplot(value, output, thread_id);
break;
}
}
fclose(output);
}
// Format specific print methods
template < typename T >
void Base::PrintGnuplot(T value, FILE * output, int thread_id)
{
std::vector< PapiDerivedStat > stats;
GetDerivedStats(stats);
fprintf(output, "#");
if(thread_id != ALL_THREADS)
fprintf(output, "THREAD");
fprintf(output, "%15s", "VALUE");
for(int event = 0; event < papi_events_.size(); ++event)
fprintf(output, "%20s", papi_event_names_[event].c_str());
for(int d_event = 0; d_event < stats.size(); ++d_event)
fprintf(output, "%20s", GetDerivedStatName(stats[d_event]).c_str());
fprintf(output, "\n");
long long results[papi_events_.size()];
GetCounters(results, thread_id);
if(thread_id != ALL_THREADS)
fprintf(output, "%-7d", thread_id);
double t_value = static_cast<double>(value);
fprintf(output, "%16f", t_value);
for(int event = 0; event < papi_events_.size(); ++event)
fprintf(output, "%20d", results[event]);
for(int d_event = 0; d_event < stats.size(); ++d_event)
fprintf(output, "%20f", ComputeDerivedStat(stats[d_event], thread_id));
fprintf(output, "\n");
}
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__bshift_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_03__bshift_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_uint8)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((node))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_uint8)
// C=scalar+B GB (_bind1st__bshift_uint8)
// C=scalar+B' GB (_bind1st_tran__bshift_uint8)
// C=A+scalar GB (_bind2nd__bshift_uint8)
// C=A'+scalar GB (_bind2nd_tran__bshift_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_uint8 (aij, bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_bitshift_uint8 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_UINT8 || GxB_NO_BSHIFT_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((node))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bshift_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = GB_bitshift_uint8 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = GB_bitshift_uint8 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_uint8 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_uint8 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_fp64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_fp64_fc64
// op(A') function: GB_unop_tran__identity_fp64_fc64
// C type: double
// A type: GxB_FC64_t
// cast: double cij = (double) creal (aij)
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
double z = (double) creal (aij) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = (double) creal (aij) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_fp64_fc64
(
double *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
double z = (double) creal (aij) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
double z = (double) creal (aij) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fp64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
problem14.c | //Benjamin Ford
//Problem 14: https://projecteuler.net/problem=14
//gcc -o prob14 problem14.c -lm -fopenmp
#include <stdio.h>
#include <omp.h>
#define limit 10000000
int getSeq(int);
int main(int argc, char *argv[]){
int maxValue = 0;
int longest = 0;
int i = 0;
double time0, time1, timeTaken;
time0 = omp_get_wtime();
omp_set_num_threads( 8 );
#pragma omp parallel default(none), shared(maxValue, longest), private(i)
{
#pragma omp for schedule(static)
for(i = limit; i > 0; i--)
{
int tempVal = 0;
int tid;
tempVal = getSeq(i);
tid = omp_get_thread_num();
#pragma omp critical
{
if(tempVal > maxValue){
maxValue = tempVal;
longest = i;
}
}
}
}
time1 = omp_get_wtime();
timeTaken = time1 - time0;
printf("Longest: %d Time Taken: %8.8lf\n",longest, timeTaken);
return 0;
}
int getSeq(int num){
int ctr = 1;
long val = num;
if(val < 1){
return 0;
}
while(val != 1){
ctr++;
if(val % 2 == 0){
val = val/2;
}
else{
val = 3*val + 1;
}
}
return ctr;
} |
move_particle_utility.h | // KRATOS ___ ___ _ ___ __ ___ ___ ___ ___
// / __/ _ \| \| \ \ / /__| \_ _| __| __|
// | (_| (_) | .` |\ V /___| |) | || _|| _|
// \___\___/|_|\_| \_/ |___/___|_| |_| APPLICATION
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Pablo Becker
//
#if !defined(KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED)
#define KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/node.h"
///
#include "includes/dof.h"
#include "includes/variables.h"
#include "containers/array_1d.h"
#include "containers/data_value_container.h"
#include "includes/mesh.h"
#include "utilities/math_utils.h"
///
#include "utilities/geometry_utilities.h"
#include "includes/model_part.h"
#include "spatial_containers/spatial_containers.h"
#include "spatial_containers/cell.h"
#include "spatial_containers/bins_dynamic_objects.h"
#include "utilities/spatial_containers_configure.h"
#include "geometries/line_2d_2.h"
#include "geometries/triangle_2d_3.h"
#include "geometries/triangle_3d_3.h"
#include "geometries/point.h"
#include "convection_diffusion_application.h"
#include "convection_particle.h"
#include "utilities/openmp_utils.h"
#include "time.h"
//#include "processes/process.h"
namespace Kratos
{
//this class is to be modified by the user to customize the interpolation process
template< unsigned int TDim>
class MoveParticleUtilityScalarTransport
{
public:
typedef SpatialContainersConfigure<TDim> Configure;
typedef typename Configure::PointType PointType;
//typedef PointType::CoordinatesArrayType CoordinatesArrayType;
typedef typename Configure::ContainerType ContainerType;
//typedef Configure::PointerType PointerType;
typedef typename Configure::IteratorType IteratorType;
typedef typename Configure::ResultContainerType ResultContainerType;
//typedef Configure::ResultPointerType ResultPointerType;
typedef typename Configure::ResultIteratorType ResultIteratorType;
typedef PointerVector< Convection_Particle, Convection_Particle*, std::vector<Convection_Particle*> > ParticlePointerVector;
//typedef Configure::ContactPairType ContactPairType;
//typedef Configure::ContainerContactType ContainerContactType;
//typedef Configure::IteratorContactType IteratorContactType;
//typedef Configure::PointerContactType PointerContactType;
//typedef Configure::PointerTypeIterator PointerTypeIterator;
KRATOS_CLASS_POINTER_DEFINITION(MoveParticleUtilityScalarTransport);
//template<unsigned int TDim>
MoveParticleUtilityScalarTransport(ModelPart& model_part, int maximum_number_of_particles)
: mr_model_part(model_part) , mmaximum_number_of_particles(maximum_number_of_particles) ,
mUnknownVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetUnknownVariable()) ,
mProjectionVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetProjectionVariable()) ,
mVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetVelocityVariable()) ,
mMeshVelocityVar((model_part.GetProcessInfo()[CONVECTION_DIFFUSION_SETTINGS])->GetMeshVelocityVariable())
{
std::cout << "initializing moveparticle utility for scalar transport" << std::endl;
Check();
//storing water and air density and their inverses, just in case it is needed for the streamline integration
//loop in elements to change their ID to their position in the array. Easier to get information later.
//DO NOT PARALELIZE THIS! IT MUST BE SERIAL!!!!!!!!!!!!!!!!!!!!!!
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
ielem->SetId(ii+1);
}
mlast_elem_id= (mr_model_part.ElementsEnd()-1)->Id();
int node_id=0;
// we look for the smallest edge. could be used as a weighting function when going lagrangian->eulerian instead of traditional shape functions(method currently used)
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator pnode = inodebegin+ii;
array_1d<double,3> position_node;
double distance=0.0;
position_node = pnode->Coordinates();
GlobalPointersVector< Node<3> >& rneigh = pnode->GetValue(NEIGHBOUR_NODES);
//we loop all the nodes to check all the edges
const double number_of_neighbours = double(rneigh.size());
for( GlobalPointersVector<Node<3> >::iterator inode = rneigh.begin(); inode!=rneigh.end(); inode++)
{
array_1d<double,3> position_difference;
position_difference = inode->Coordinates() - position_node;
double current_distance= sqrt(pow(position_difference[0],2)+pow(position_difference[1],2)+pow(position_difference[2],2));
//if (current_distance>distance)
// distance=current_distance;
distance += current_distance / number_of_neighbours;
}
//and we save the largest edge.
pnode->FastGetSolutionStepValue(MEAN_SIZE)=distance;
node_id=pnode->GetId();
}
}
mlast_node_id=node_id;
//we also calculate the element mean size in the same way, for the courant number
//also we set the right size to the LHS column for the pressure enrichments, in order to recover correctly the enrichment pressure
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
double mElemSize;
array_1d<double,3> Edge(3,0.0);
Edge = ielem->GetGeometry()[1].Coordinates() - ielem->GetGeometry()[0].Coordinates();
mElemSize = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
mElemSize += Edge[d]*Edge[d];
for (unsigned int i = 2; i < (TDim+1); i++)
for(unsigned int j = 0; j < i; j++)
{
Edge = ielem->GetGeometry()[i].Coordinates() - ielem->GetGeometry()[j].Coordinates();
double Length = Edge[0]*Edge[0];
for (unsigned int d = 1; d < TDim; d++)
Length += Edge[d]*Edge[d];
if (Length < mElemSize) mElemSize = Length;
}
mElemSize = sqrt(mElemSize);
ielem->GetValue(MEAN_SIZE) = mElemSize;
}
}
//matrix containing the position of the 4/15/45 particles that we will seed at the beggining
BoundedMatrix<double, 5*(1+TDim), 3 > pos;
BoundedMatrix<double, 5*(1+TDim), (1+TDim) > N;
int particle_id=0;
mnelems = mr_model_part.Elements().size();
std::cout << "about to resize vectors" << std::endl;
//setting the right size to the vector containing the particles assigned to each element
//particles vector. this vector contains ALL the particles in the simulation.
mparticles_vector.resize(mnelems*mmaximum_number_of_particles);
//and this vector contains the current number of particles that are in each element (currently zero)
mnumber_of_particles_in_elems.resize(mnelems);
mnumber_of_particles_in_elems=ZeroVector(mnelems);
//when moving the particles, an auxiliary vector is necessary (to store the previous number)
mnumber_of_particles_in_elems_aux.resize(mnelems);
//each element will have a list of pointers to all the particles that are inside.
//this vector contains the pointers to the vector of (particle) pointers of each element.
mvector_of_particle_pointers_vectors.resize(mnelems);
//int artz;
//std::cin >> artz;
int i_int=0; //careful! it's not the id, but the position inside the array!
std::cout << "about to create particles" << std::endl;
//now we seed: LOOP IN ELEMENTS
//using loop index, DO NOT paralelize this! change lines : mparticles_in_elems_pointers((ii*mmaximum_number_of_particles)+mparticles_in_elems_integers(ii)) = pparticle; and the next one
moffset=0;
//Convection_Particle& firstparticle =mparticles_vector[0];
for(unsigned int ii=0; ii<mr_model_part.Elements().size(); ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//(ielem->GetValue(BED_PARTICLE_POINTERS)) = ParticlePointerVector( mmaximum_number_of_particles*2, &firstparticle );
//ParticlePointerVector& particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//now we link the mpointers_to_particle_pointers_vectors to the corresponding element
//mpointers_to_particle_pointers_vectors(ii) = &particle_pointers;
//now we resize the vector of particle pointers. it is double sized because we move the particles from an initial position (first half) to a final position (second half).
//for(int j=0; j<(mmaximum_number_of_particles*2); j++)
// particle_pointers.push_back(&firstparticle);
mvector_of_particle_pointers_vectors[ii] = ParticlePointerVector( mmaximum_number_of_particles*2 );
ParticlePointerVector& particle_pointers = mvector_of_particle_pointers_vectors[ii];
//int & number_of_particles = ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles = mnumber_of_particles_in_elems[ii];
number_of_particles=0;
Geometry< Node<3> >& geom = ielem->GetGeometry();
//unsigned int elem_id = ielem->Id();
//mareas_vector[i_int]=CalculateArea(geom); UNUSED SO COMMENTED
ComputeGaussPointPositions_initial(geom, pos, N); //we also have the standard (4), and 45
//now we seed the particles in the current element
for (unsigned int j = 0; j < pos.size1(); j++)
{
++particle_id;
Convection_Particle& pparticle = mparticles_vector[particle_id-1];
pparticle.X()=pos(j,0);
pparticle.Y()=pos(j,1);
pparticle.Z()=pos(j,2);
pparticle.GetEraseFlag()=false;
float & scalar1= pparticle.GetScalar1();
scalar1=0.0;
for (unsigned int k = 0; k < (TDim+1); k++)
{
scalar1 += N(j, k) * geom[k].FastGetSolutionStepValue(mUnknownVar);
}
particle_pointers(j) = &pparticle;
number_of_particles++ ;
}
++i_int;
}
m_nparticles=particle_id; //we save the last particle created as the total number of particles we have. For the moment this is true.
KRATOS_WATCH(m_nparticles);
//KRATOS_WATCH(mlast_elem_id);
mparticle_printing_tool_initialized=false;
//std::cin >> artz;
}
virtual ~MoveParticleUtilityScalarTransport()
{}
void MountBin()
{
KRATOS_TRY
//copy the elements to a new container, as the list will
//be shuffled duringthe construction of the tree
ContainerType& rElements = mr_model_part.ElementsArray();
IteratorType it_begin = rElements.begin();
IteratorType it_end = rElements.end();
//const int number_of_elem = rElements.size();
typename BinsObjectDynamic<Configure>::Pointer paux = typename BinsObjectDynamic<Configure>::Pointer(new BinsObjectDynamic<Configure>(it_begin, it_end ) );
paux.swap(mpBinsObjectDynamic);
//BinsObjectDynamic<Configure> mpBinsObjectDynamic(it_begin, it_end );
std::cout << "finished mounting Bins" << std::endl;
KRATOS_CATCH("")
}
void CalculateVelOverElemSize()
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const double nodal_weight = 1.0/ (1.0 + double (TDim) );
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Geometry<Node<3> >& geom = ielem->GetGeometry();
array_1d<double, 3 >vector_mean_velocity=ZeroVector(3);
for (unsigned int i=0; i != (TDim+1) ; i++)
vector_mean_velocity += geom[i].FastGetSolutionStepValue(mVelocityVar);
vector_mean_velocity *= nodal_weight;
const double mean_velocity = sqrt ( pow(vector_mean_velocity[0],2) + pow(vector_mean_velocity[1],2) + pow(vector_mean_velocity[2],2) );
ielem->GetValue(MEAN_VEL_OVER_ELEM_SIZE) = mean_velocity / (ielem->GetValue(MEAN_SIZE));
}
}
KRATOS_CATCH("")
}
//name self explained
void ResetBoundaryConditions()
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
if (inode->IsFixed(mUnknownVar))
{
inode->FastGetSolutionStepValue(mUnknownVar)=inode->GetSolutionStepValue(mUnknownVar,1);
}
}
}
KRATOS_CATCH("")
}
void CalculateDeltaVariables()
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(DELTA_SCALAR1) = inode->FastGetSolutionStepValue(mUnknownVar) - inode->FastGetSolutionStepValue(mProjectionVar) ;
}
}
KRATOS_CATCH("")
}
void CopyScalarVarToPreviousTimeStep(const Variable<double>& OriginVariable,
ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
ModelPart::NodesContainerType::iterator inodebegin = rNodes.begin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, rNodes.size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->GetSolutionStepValue(OriginVariable,1) = inode->FastGetSolutionStepValue(OriginVariable);
}
}
KRATOS_CATCH("")
}
//to move all the particles across the streamlines. heavy task!
void MoveParticles()
{
KRATOS_TRY
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//moveparticlesdiff reads from the pointers of one part (ie odd) and saves into the other part (ie even part)
//since it is the only function in the whole procedure that does this, it must use alternatively one part and the other.
//KRATOS_WATCH(offset)
bool even_timestep;
if (offset!=0) even_timestep=false;
else even_timestep=true;
const int post_offset = mmaximum_number_of_particles*int(even_timestep); //and we also save the offset to know the location in which we will save the pointers after we've moved the particles
//KRATOS_WATCH(post_offset)
double delta_t = CurrentProcessInfo[DELTA_TIME];
array_1d<double,TDim+1> N;
const unsigned int max_results = 10000;
//double integration_distance= 2.0;
max_nsubsteps = 10;
max_substep_dt=delta_t/double(max_nsubsteps);
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
//before doing anything we must reset the vector of nodes contained by each element (particles that are inside each element.
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii;
int & number_of_particles = mnumber_of_particles_in_elems[ii]; //old_element->GetValue(NUMBER_OF_BED_PARTICLES);
mnumber_of_particles_in_elems_aux[ii]=number_of_particles;
mnumber_of_particles_in_elems[ii]=0;
//we reset the local vectors for a faster access;
}
}
std::cout << "convecting particles" << std::endl;
//We move the particles across the fixed mesh and saving change data into them (using the function MoveParticle)
#pragma omp barrier
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
ResultContainerType results(max_results);
GlobalPointersVector< Element > elements_in_trajectory;
elements_in_trajectory.resize(20);
for(unsigned int ielem=element_partition[kkk]; ielem<element_partition[kkk+1]; ielem++)
{
//for(unsigned int ielem=0; ielem<mr_model_part.Elements().size(); ielem++)
//{
ModelPart::ElementsContainerType::iterator old_element = ielembegin+ielem;
const int old_element_id = old_element->Id();
ParticlePointerVector& old_element_particle_pointers = mvector_of_particle_pointers_vectors(old_element_id-1);
if ( (results.size()) !=max_results)
results.resize(max_results);
unsigned int number_of_elements_in_trajectory=0; //excluding the origin one (current one, ielem)
for(int ii=0; ii<(mnumber_of_particles_in_elems_aux(ielem)); ii++)
{
Convection_Particle & pparticle = old_element_particle_pointers[offset+ii];
Element::Pointer pcurrent_element( *old_element.base() );
ResultIteratorType result_begin = results.begin();
bool & erase_flag=pparticle.GetEraseFlag();
if (erase_flag==false){
MoveParticle(pparticle,pcurrent_element,elements_in_trajectory,number_of_elements_in_trajectory,result_begin,max_results); //saqué N de los argumentos, no lo necesito ya q empieza SIEMPRE en un nodo y no me importa donde termina
const int current_element_id = pcurrent_element->Id();
int & number_of_particles_in_current_elem = mnumber_of_particles_in_elems(current_element_id-1);
//int & number_of_water_particles_in_current_elem = mnumber_of_water_particles_in_elems(current_element_id-1);
if (number_of_particles_in_current_elem<mmaximum_number_of_particles && erase_flag==false)
{
{
ParticlePointerVector& current_element_particle_pointers = mvector_of_particle_pointers_vectors(current_element_id-1);
#pragma omp critical
{
if (number_of_particles_in_current_elem<mmaximum_number_of_particles) // we cant go over this node, there's no room. otherwise we would be in the position of the first particle of the next element!!
{
current_element_particle_pointers(post_offset+number_of_particles_in_current_elem) = &pparticle;
number_of_particles_in_current_elem++ ;
if (number_of_particles_in_current_elem>mmaximum_number_of_particles)
KRATOS_WATCH("MAL");
}
else
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
else
pparticle.GetEraseFlag()=true; //so we just delete it!
}
}
}
}
/*
//now we pass info from the local vector to the elements:
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator old_element = ielembegin+ii;
old_element->GetValue(NUMBER_OF_BED_PARTICLES) = mnumber_of_particles_in_elems(ii);
//old_element->GetValue(NUMBER_OF_WATER_PARTICLES) = mnumber_of_water_particles_in_elems(ii);
}
}
*/
//after having changed everything we change the status of the modd_timestep flag:
moffset = post_offset;; //
KRATOS_CATCH("")
}
void TransferLagrangianToEulerian() //explicit
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
//const double delta_t =CurrentProcessInfo[DELTA_TIME];
const double threshold= 0.0/(double(TDim)+1.0);
std::cout << "projecting info to mesh" << std::endl;
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//KRATOS_WATCH(offset) //(flag managed only by MoveParticles
//we must project data from the particles (lagrangian) into the eulerian mesh
//ValuesVectorType eulerian_nodes_old_temperature;
//int nnodes = mr_model_part.Nodes().size();
//array_1d<double,(n_nodes)> eulerian_nodes_sumweights;
//we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes
//though we could've use a bigger buffer, to be changed later!
//after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles.
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mProjectionVar)=0.0;
inode->FastGetSolutionStepValue(YP)=0.0;
}
}
//adding contribution, loop on elements, since each element has stored the particles found inside of it
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
array_1d<double,3*(TDim+1)> nodes_positions;
array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1));
array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1));
//array_1d<double,(TDim+1)> weighting_inverse_divisor;
Geometry<Node<3> >& geom = ielem->GetGeometry();
for (int i=0 ; i!=(TDim+1) ; ++i)
{
nodes_positions[i*3+0]=geom[i].X();
nodes_positions[i*3+1]=geom[i].Y();
nodes_positions[i*3+2]=geom[i].Z();
//weighting_inverse_divisor[i]=1.0/((geom[i].FastGetSolutionStepValue(MEAN_SIZE))*1.01);
}
///KRATOS_WATCH(ielem->Id())
///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size());
//int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES);
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop!
break;
Convection_Particle & pparticle = element_particle_pointers[offset+iii];
if (pparticle.GetEraseFlag()==false)
{
array_1d<double,3> & position = pparticle.Coordinates();
const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air
array_1d<double,TDim+1> N;
bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N);
if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element.
{
KRATOS_WATCH(N);
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 && N[j]> -1e-5)
N[j]=1e-10;
}
for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element
{
//double sq_dist = 0;
//these lines for a weighting function based on the distance (or square distance) from the node insteadof the shape functions
//for (int k=0 ; k!=(TDim); k++) sq_dist += ((position[k] - nodes_positions[j*3+k])*(position[k] - nodes_positions[j*3+k]));
//double weight = (1.0 - (sqrt(sq_dist)*weighting_inverse_divisor[j] ) );
double weight=N(j)*N(j);
//weight=N(j)*N(j)*N(j);
if (weight<threshold) weight=1e-10;
if (weight<0.0) {KRATOS_WATCH(weight)}//;weight=0.0;KRATOS_WATCH(velocity);KRATOS_WATCH(N);KRATOS_WATCH(number_of_particles_in_elem);}//{KRATOS_WATCH(weight); KRATOS_WATCH(geom[j].Id()); KRATOS_WATCH(position);}
else
{
nodes_addedweights[j]+= weight;
//nodes_addedtemp[j] += weight * particle_temp;
nodes_added_scalar1[j] += weight*particle_scalar1;
}//
}
}
}
for (int i=0 ; i!=(TDim+1) ; ++i) {
geom[i].SetLock();
geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i];
geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i];
geom[i].UnSetLock();
}
}
}
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double sum_weights = inode->FastGetSolutionStepValue(YP);
if (sum_weights>0.00001)
{
//inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT)=(inode->FastGetSolutionStepValue(TEMPERATURE_OLD_IT))/sum_weights; //resetting the temperature
double & height = inode->FastGetSolutionStepValue(mProjectionVar);
height /=sum_weights; //resetting the density
}
else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case..
{
inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1); //resetting the temperature
}
}
}
KRATOS_CATCH("")
}
void TransferLagrangianToEulerianImp() //semi implicit
{
KRATOS_TRY
// ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
std::cout << "projecting info to mesh (semi implicit)" << std::endl;
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//KRATOS_WATCH(offset) //(flag managed only by MoveParticles
//we must project data from the particles (lagrangian) into the eulerian mesh
//ValuesVectorType eulerian_nodes_old_temperature;
//int nnodes = mr_model_part.Nodes().size();
//array_1d<double,(n_nodes)> eulerian_nodes_sumweights;
//we save data from previous time step of the eulerian mesh in case we must reuse it later cos no particle was found around the nodes
//though we could've use a bigger buffer, to be changed later!
//after having saved data, we reset them to zero, this way it's easier to add the contribution of the surrounding particles.
ModelPart::NodesContainerType::iterator inodebegin = mr_model_part.NodesBegin();
vector<unsigned int> node_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Nodes().size(), node_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mProjectionVar)=0.0;
inode->FastGetSolutionStepValue(YP)=0.0;
}
}
//adding contribution, loop on elements, since each element has stored the particles found inside of it
vector<unsigned int> element_partition;
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
//creating a matrix for each of the problems.
BoundedMatrix<double, TDim+1 , TDim+1 > mass_matrix; // WE ONLY NEED ONE! they are the same for all the variables! //_x,mass_matrix_y,mass_matrix_z,mass_matrix_d; //mass matrices for the projected vel (x,y,z) and the distance
array_1d<double,(TDim+1)> rhs_scalar1;
array_1d<double,3*(TDim+1)> nodes_positions;
array_1d<double,(TDim+1)> nodes_added_scalar1 = ZeroVector((TDim+1));
array_1d<double,(TDim+1)> nodes_addedweights = ZeroVector((TDim+1));
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
nodes_added_scalar1 = ZeroVector((TDim+1)); //resetting vectors
nodes_addedweights = ZeroVector((TDim+1)); //resetting vectors
mass_matrix = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices. WE ONLY NEED ONE! they are the same for all the variable. only the rhs changes.
//mass_matrix_y = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices
//mass_matrix_z = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices
//mass_matrix_d = ZeroMatrix(TDim+1 , TDim+1 ); //resetting matrices
rhs_scalar1 = ZeroVector((TDim+1)); //resetting vectors
Geometry<Node<3> >& geom = ielem->GetGeometry();
const double elem_volume = geom.Area();
for (int i=0 ; i!=(TDim+1) ; ++i) //saving the nodal positions for faster access
{
nodes_positions[i*3+0]=geom[i].X();
nodes_positions[i*3+1]=geom[i].Y();
nodes_positions[i*3+2]=geom[i].Z();
}
///KRATOS_WATCH(ielem->Id())
///KRATOS_WATCH(ielem->GetValue(NEIGHBOUR_NODES).size());
//int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES);
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
if (iii==mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop!
break;
Convection_Particle & pparticle = element_particle_pointers[offset+iii];
if (pparticle.GetEraseFlag()==false)
{
array_1d<double,3> & position = pparticle.Coordinates();
const float& particle_scalar1 = pparticle.GetScalar1(); // -1 if water, +1 if air
array_1d<double,TDim+1> N;
bool is_found = CalculatePosition(nodes_positions,position[0],position[1],position[2],N);
if (is_found==false) //something went wrong. if it was close enough to the edge we simply send it inside the element.
{
KRATOS_WATCH(N);
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 && N[j]> -1e-5)
N[j]=1e-10;
}
for (int j=0 ; j!=(TDim+1); j++) //going through the 3/4 nodes of the element
{
double weight=N(j);
for (int k=0 ; k!=(TDim+1); k++) //building the mass matrix
mass_matrix(j,k) += weight*N(k);
rhs_scalar1[j] += weight * double(particle_scalar1);
//adding also a part with the lumped mass matrix to reduce overshoots and undershoots
if(true)
{
double this_particle_weight = weight*elem_volume/(double(number_of_particles_in_elem))*0.1; //can be increased or reduced to change the lumped mass contrubtion
nodes_addedweights[j]+= this_particle_weight;
nodes_added_scalar1[j] += this_particle_weight*particle_scalar1;
}
}
}
}
//now we invert the matrix
BoundedMatrix<double, TDim+1 , TDim+1 > inverse_mass_matrix=ZeroMatrix(TDim+1 , TDim+1);
if(TDim==3)
InvertMatrix( mass_matrix, inverse_mass_matrix);
else
InvertMatrix3x3( mass_matrix, inverse_mass_matrix);
//and now compute the elemental contribution to the gobal system:
if(number_of_particles_in_elem > static_cast<int>(TDim)*3) //otherwise it's impossible to define a correctly the gradients, therefore the results inside the element are useless.
{
for (int i=0 ; i!=(TDim+1); i++)
{
for (int j=0 ; j!=(TDim+1); j++)
{
nodes_added_scalar1[i] += inverse_mass_matrix(i,j)*rhs_scalar1[j]*elem_volume*(1.0/(double(1+TDim)));
}
}
//and also to the mass matrix. LUMPED (but for the contribution of the grandient at elemental level.
for (int i=0 ; i!=(TDim+1); i++)
nodes_addedweights[i] += elem_volume*(1.0/(double(1+TDim)));
}
for (int i=0 ; i!=(TDim+1) ; ++i) {
geom[i].SetLock();
geom[i].FastGetSolutionStepValue(mProjectionVar) +=nodes_added_scalar1[i];
geom[i].FastGetSolutionStepValue(YP) +=nodes_addedweights[i];
geom[i].UnSetLock();
}
}
}
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=node_partition[kkk]; ii<node_partition[kkk+1]; ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
double sum_weights = inode->FastGetSolutionStepValue(YP);
if (sum_weights>0.00001)
{
double & scalar1 = inode->FastGetSolutionStepValue(mProjectionVar);
scalar1 /=sum_weights; //resetting the density
}
else //this should never happen because other ways to recover the information have been executed before, but leaving it just in case..
{
inode->FastGetSolutionStepValue(mProjectionVar)=inode->FastGetSolutionStepValue(mUnknownVar,1);
}
}
}
KRATOS_CATCH("")
}
void CorrectParticlesWithoutMovingUsingDeltaVariables()
{
KRATOS_TRY
//std::cout << "updating particles" << std::endl;
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset = moffset; //the array of pointers for each element has twice the required size so that we use a part in odd timesteps and the other in even ones.
//(flag managed only by MoveParticles
//KRATOS_WATCH(offset)
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
vector<unsigned int> element_partition;
#ifdef _OPENMP
int number_of_threads = omp_get_max_threads();
#else
int number_of_threads = 1;
#endif
OpenMPUtils::CreatePartition(number_of_threads, mr_model_part.Elements().size(), element_partition);
#pragma omp parallel for
for(int kkk=0; kkk<number_of_threads; kkk++)
{
for(unsigned int ii=element_partition[kkk]; ii<element_partition[kkk+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
Element::Pointer pelement(*ielem.base());
Geometry<Node<3> >& geom = ielem->GetGeometry();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
//std::cout << "elem " << ii << " with " << (unsigned int)number_of_particles_in_elem << " particles" << std::endl;
for (int iii=0; iii<number_of_particles_in_elem ; iii++ )
{
//KRATOS_WATCH(iii)
if (iii>mmaximum_number_of_particles) //it means we are out of our portion of the array, abort loop!
break;
Convection_Particle & pparticle = element_particle_pointers[offset+iii];
bool erase_flag= pparticle.GetEraseFlag();
if (erase_flag==false)
{
CorrectParticleUsingDeltaVariables(pparticle,pelement,geom); //'lite' version, we pass by reference the geometry, so much cheaper
}
}
}
}
KRATOS_CATCH("")
}
//**************************************************************************************************************
//**************************************************************************************************************
template< class TDataType > void AddUniqueWeakPointer
(GlobalPointersVector< TDataType >& v, const typename TDataType::WeakPointer candidate)
{
typename GlobalPointersVector< TDataType >::iterator i = v.begin();
typename GlobalPointersVector< TDataType >::iterator endit = v.end();
while ( i != endit && (i)->Id() != (candidate)->Id())
{
i++;
}
if( i == endit )
{
v.push_back(candidate);
}
}
//**************************************************************************************************************
//**************************************************************************************************************
void PreReseed(int minimum_number_of_particles)
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset =moffset;
const int max_results = 1000;
//tools for the paralelization
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
vector<unsigned int> elem_partition;
int number_of_rows=mr_model_part.Elements().size();
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
//KRATOS_WATCH(elem_partition_size);
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition)
{
ResultContainerType results(max_results);
int k = OpenMPUtils::ThisThread();
//ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k];
//ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ;
//ModelPart::NodesContainerType local_list=aux[k];
//PointerVectorSet<Convection_Particle, IndexedObject> & list=aux[k];
//KRATOS_WATCH(k);
BoundedMatrix<double, (TDim+1), 3 > pos;
BoundedMatrix<double, (TDim+1) , (TDim+1) > N;
unsigned int freeparticle=0; //we start with the first position in the particles array
//int local_id=1;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
results.resize(max_results);
//const int & elem_id = ielem->Id();
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
//int & number_of_particles_in_elem=ielem->GetValue(NUMBER_OF_BED_PARTICLES);
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
if (number_of_particles_in_elem<(minimum_number_of_particles))// && (ielem->GetGeometry())[0].Y()<0.10 )
{
//KRATOS_WATCH("elem with little particles")
Geometry< Node<3> >& geom = ielem->GetGeometry();
ComputeGaussPointPositionsForPreReseed(geom, pos, N);
//double conductivity = ielem->GetProperties()[CONDUCTIVITY];
//KRATOS_WATCH(conductivity);
for (unsigned int j = 0; j < (pos.size1()); j++) //i am dropping the last one, the one in the middle of the element
{
bool keep_looking = true;
while(keep_looking)
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
mparticles_vector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
{
freeparticle++;
}
}
Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux2_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux2_N);
if (is_found==false)
{
KRATOS_WATCH(aux2_N);
}
pparticle.GetEraseFlag()=false;
ResultIteratorType result_begin = results.begin();
Element::Pointer pelement( *ielem.base() );
MoveParticle_inverse_way(pparticle, pelement, result_begin, max_results);
//and we copy it to the array:
mparticles_vector[freeparticle] = pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle];
pparticle.GetEraseFlag()=false;
number_of_particles_in_elem++;
}
}
}
}
KRATOS_CATCH("")
}
//**************************************************************************************************************
//**************************************************************************************************************
void PostReseed(int minimum_number_of_particles) //pooyan's way
{
KRATOS_TRY
//ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
const int offset = moffset;
//TOOLS FOR THE PARALELIZATION
//int last_id= (mr_linea_model_part.NodesEnd()-1)->Id();
unsigned int number_of_threads = OpenMPUtils::GetNumThreads();
//KRATOS_WATCH(number_of_threads);
vector<unsigned int> elem_partition;
int number_of_rows=mr_model_part.Elements().size();
//KRATOS_WATCH(number_of_threads);
//KRATOS_THROW_ERROR(std::logic_error, "Add ----NODAL_H---- variable!!!!!! ERROR", "");
elem_partition.resize(number_of_threads + 1);
int elem_partition_size = number_of_rows / number_of_threads;
elem_partition[0] = 0;
elem_partition[number_of_threads] = number_of_rows;
//KRATOS_WATCH(elem_partition_size);
for (unsigned int i = 1; i < number_of_threads; i++)
elem_partition[i] = elem_partition[i - 1] + elem_partition_size;
//typedef Node < 3 > PointType;
//std::vector<ModelPart::NodesContainerType> aux;// aux;
//aux.resize(number_of_threads);
//ModelPart::NodesContainerType::iterator it_begin_particle_model_part = mr_linea_model_part.NodesBegin();
//ModelPart::NodesContainerType::iterator it_end_particle_model_part = mr_linea_model_part.NodesEnd();
ModelPart::ElementsContainerType::iterator ielembegin = mr_model_part.ElementsBegin();
#pragma omp parallel firstprivate(elem_partition) // firstprivate(results)//we will add the nodes in different parts of aux and later assemple everything toghether, remaming particles ids to get consecutive ids
{
unsigned int reused_particles=0;
unsigned int freeparticle = 0; //we start by the first position;
int k = OpenMPUtils::ThisThread();
//ModelPart::ElementsContainerType::iterator it_begin = mr_model_part.ElementsBegin() + elem_partition[k];
//ModelPart::ElementsContainerType::iterator it_end = mr_model_part.ElementsBegin() + elem_partition[k+1] ;
BoundedMatrix<double, (3+2*TDim), 3 > pos; //7 particles (2D) or 9 particles (3D)
BoundedMatrix<double, (3+2*TDim), (TDim+1) > N;
double mesh_scalar1;
array_1d<int, (3+2*TDim) > positions;
unsigned int number_of_reseeded_particles;
//unsigned int number_of_water_reseeded_particles;
//array_1d<double, 3 > nodes_distances;
for(unsigned int ii=elem_partition[k]; ii<elem_partition[k+1]; ii++)
{
//const int & elem_id = ielem->Id();
ModelPart::ElementsContainerType::iterator ielem = ielembegin+ii;
//int & number_of_particles_in_elem= ielem->GetValue(NUMBER_OF_BED_PARTICLES);
//ParticlePointerVector& element_particle_pointers = (ielem->GetValue(BED_PARTICLE_POINTERS));
int & number_of_particles_in_elem= mnumber_of_particles_in_elems[ii];
ParticlePointerVector& element_particle_pointers = mvector_of_particle_pointers_vectors[ii];
Geometry< Node<3> >& geom = ielem->GetGeometry();
if ( (number_of_particles_in_elem<(minimum_number_of_particles)))// && (geom[0].Y()<0.10) ) || (number_of_water_particles_in_elem>2 && number_of_particles_in_elem<(minimum_number_of_particles) ) )
{
//bool reseed_more=false;
number_of_reseeded_particles=0;
//reseed_more=true;
number_of_reseeded_particles= 3+2*TDim;
ComputeGaussPointPositionsForPostReseed(geom, pos, N);
for (unsigned int j = 0; j < number_of_reseeded_particles; j++)
{
//now we have to find an empty space ( a particle that was about to be deleted) in the particles model part. once found. there will be our renewed particle:
bool keep_looking = true;
while(keep_looking)
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
#pragma omp critical
{
if (mparticles_vector[freeparticle].GetEraseFlag()==true)
{
mparticles_vector[freeparticle].GetEraseFlag()=false;
keep_looking=false;
}
}
if (keep_looking==false)
break;
else
freeparticle++;
}
else
{
freeparticle++;
}
}
Convection_Particle pparticle(pos(j,0),pos(j,1),pos(j,2));
array_1d<double,TDim+1>aux_N;
bool is_found = CalculatePosition(geom,pos(j,0),pos(j,1),pos(j,2),aux_N);
if (is_found==false)
{
KRATOS_WATCH(aux_N);
KRATOS_WATCH(j)
KRATOS_WATCH(ielem->Id())
}
mesh_scalar1 = 0.0;
for (unsigned int l = 0; l < (TDim+1); l++)
{
mesh_scalar1 += N(j,l) * geom[l].FastGetSolutionStepValue(mUnknownVar);
}
pparticle.GetScalar1()=mesh_scalar1;
pparticle.GetEraseFlag()=false;
mparticles_vector[freeparticle]=pparticle;
element_particle_pointers(offset+number_of_particles_in_elem) = &mparticles_vector[freeparticle];
number_of_particles_in_elem++;
if (keep_looking)
{
KRATOS_THROW_ERROR(std::logic_error, "FINISHED THE LIST AND COULDNT FIND A FREE CELL FOR THE NEW PARTICLE!", "");
}
else
{
reused_particles++;
}
}
}
}
}
KRATOS_CATCH("")
}
void ExecuteParticlesPritingTool( ModelPart& lagrangian_model_part, int input_filter_factor )
{
KRATOS_TRY
//mfilter_factor; //we will only print one out of every "filter_factor" particles of the total particle list
if(mparticle_printing_tool_initialized==false)
{
mfilter_factor=input_filter_factor;
if(lagrangian_model_part.NodesBegin()-lagrangian_model_part.NodesEnd()>0)
KRATOS_THROW_ERROR(std::logic_error, "AN EMPTY MODEL PART IS REQUIRED FOR THE PRINTING OF PARTICLES", "");
lagrangian_model_part.AddNodalSolutionStepVariable(DISPLACEMENT);
lagrangian_model_part.AddNodalSolutionStepVariable(mUnknownVar);
for (unsigned int i=0; i!=((mmaximum_number_of_particles*mnelems)/mfilter_factor)+mfilter_factor; i++)
{
Node < 3 > ::Pointer pnode = lagrangian_model_part.CreateNewNode( i+mlast_node_id+1 , 0.0, 0.0, 0.0); //recordar que es el nueevo model part!!
//pnode->SetBufferSize(mr_model_part.NodesBegin()->GetBufferSize());
pnode->SetBufferSize(1);
}
mparticle_printing_tool_initialized=true;
}
//resetting data of the unused particles
const double inactive_particle_position= -10.0;
array_1d<double,3>inactive_particle_position_vector;
inactive_particle_position_vector(0)=inactive_particle_position;
inactive_particle_position_vector(1)=inactive_particle_position;
inactive_particle_position_vector(2)=inactive_particle_position;
ModelPart::NodesContainerType::iterator inodebegin = lagrangian_model_part.NodesBegin();
for(unsigned int ii=0; ii<lagrangian_model_part.Nodes().size(); ii++)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+ii;
inode->FastGetSolutionStepValue(mUnknownVar) = 0.0;
inode->FastGetSolutionStepValue(DISPLACEMENT) = inactive_particle_position_vector;
}
int counter=0;
//ModelPart::NodesContainerType::iterator it_begin = lagrangian_model_part.NodesBegin();
for (int i=0; i!=mmaximum_number_of_particles*mnelems; i++)
{
Convection_Particle& pparticle =mparticles_vector[i];
if(pparticle.GetEraseFlag()==false && i%mfilter_factor==0)
{
ModelPart::NodesContainerType::iterator inode = inodebegin+counter; //copying info from the particle to the (printing) node.
inode->FastGetSolutionStepValue(mUnknownVar) = pparticle.GetScalar1();
inode->FastGetSolutionStepValue(DISPLACEMENT) = pparticle.Coordinates();
counter++;
}
}
KRATOS_CATCH("")
}
protected:
private:
///this function moves a particle according to the "velocity" given
///by "rVariable". The movement is performed in nsubsteps, during a total time
///of Dt
void MoveParticle( Convection_Particle & pparticle,
Element::Pointer & pelement,
GlobalPointersVector< Element >& elements_in_trajectory,
unsigned int & number_of_elements_in_trajectory,
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool KEEP_INTEGRATING=false;
bool is_found;
//bool have_air_node;
//bool have_water_node;
array_1d<double,3> vel;
array_1d<double,3> vel_without_other_phase_nodes=ZeroVector(3);
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
position = pparticle.Coordinates(); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
KEEP_INTEGRATING=true;
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel=ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j];
}
//calculating substep to get +- courant(substep) = 0.1
nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0;// weight;//*double(nsubsteps);
position += vel*substep_dt;//weight;
//DONE THE FIRST LOCATION OF THE PARTICLE, NOW WE PROCEED TO STREAMLINE INTEGRATION USING THE MESH SEDIMENT_VELOCITY
//////////////////////////////////////////////////////////////////////////////////////////////////////
unsigned int check_from_element_number=0;
for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle.
{
if (KEEP_INTEGRATING==true)
{
is_found = FindNodeOnMesh(position, N ,pelement,elements_in_trajectory,number_of_elements_in_trajectory,check_from_element_number,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel = ZeroVector(3);
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j];
}
only_integral += 1.0; //values saved for the current time step
position+=vel*substep_dt;//weight;
}
else
{
KEEP_INTEGRATING=false;
break;
}
}
else
break;
}
}
if (KEEP_INTEGRATING==false) (pparticle.GetEraseFlag()=true);
else is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //we must save the pointer of the last element that we're in (inside the pointervector pelement)
if (is_found==false) ( pparticle.GetEraseFlag()=true);
pparticle.Coordinates() = position;
}
void CorrectParticleUsingDeltaVariables(
Convection_Particle & pparticle,
Element::Pointer & pelement,
Geometry< Node<3> >& geom)
{
array_1d<double,TDim+1> N;
//we start with the first position, then it will enter the loop.
array_1d<double,3> coords = pparticle.Coordinates();
float & particle_scalar1 = pparticle.GetScalar1();
//double distance=0.0;
double delta_scalar1 = 0.0;
bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if(is_found == false)
{
KRATOS_WATCH(N)
for (int j=0 ; j!=(TDim+1); j++)
if (N[j]<0.0 )
N[j]=1e-10;
}
for(unsigned int j=0; j<(TDim+1); j++)
{
delta_scalar1 += geom[j].FastGetSolutionStepValue(DELTA_SCALAR1)*N[j];
}
particle_scalar1 = particle_scalar1 + delta_scalar1;
}
void MoveParticle_inverse_way(
Convection_Particle & pparticle,
Element::Pointer & pelement, //NOT A REFERENCE!! WE SHALL NOT OVERWRITE THE ELEMENT IT BELONGS TO!
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
unsigned int nsubsteps;
double substep_dt;
bool KEEP_INTEGRATING=false;
bool is_found;
array_1d<double,3> vel;
array_1d<double,3> position;
array_1d<double,3> mid_position;
array_1d<double,TDim+1> N;
double scalar1 = 0.0;
//we start with the first position, then it will enter the loop.
position = pparticle.Coordinates(); // + (pparticle)->FastGetSolutionStepValue(DISPLACEMENT); //initial coordinates
double only_integral = 0.0 ;
is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
KEEP_INTEGRATING=true;
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel=ZeroVector(3);
scalar1=0.0;
for(unsigned int j=0; j<(TDim+1); j++)
{
scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j);
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j];
}
//calculating substep to get +- courant(substep) = 1/4
nsubsteps = 10.0 * (delta_t * pelement->GetValue(MEAN_VEL_OVER_ELEM_SIZE));
if (nsubsteps<1)
nsubsteps=1;
substep_dt = delta_t / double(nsubsteps);
only_integral = 1.0;// weight;//*double(nsubsteps);
position -= vel*substep_dt;//weight;
for(unsigned int i=0; i<(nsubsteps-1); i++)// this is for the substeps n+1. in the first one we already knew the position of the particle.
{ if (KEEP_INTEGRATING==true) {
is_found = FindNodeOnMesh(position, N ,pelement,result_begin,MaxNumberOfResults); //good, now we know where this point is:
if(is_found == true)
{
Geometry< Node<3> >& geom = pelement->GetGeometry();//the element we're in
vel=ZeroVector(3);
scalar1=0.0;
for(unsigned int j=0; j<(TDim+1); j++)
{
noalias(vel) += geom[j].FastGetSolutionStepValue(mVelocityVar)*N[j] ;
scalar1 += geom[j].FastGetSolutionStepValue(mUnknownVar)*N(j);
}
only_integral += 1.0;//weight ; //values saved for the current time step
position-=vel*substep_dt;//weight;
}
else KEEP_INTEGRATING=false;
}
}
pparticle.GetScalar1()=scalar1;
}
//else {KRATOS_WATCH(position); }
}
///this function should find the element into which a given node is located
///and return a pointer to the element and the vector containing the
///shape functions that define the postion within the element
///if "false" is devolved the element is not found
bool FindNodeOnMesh( array_1d<double,3>& position,
array_1d<double,TDim+1>& N,
Element::Pointer & pelement,
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
const array_1d<double,3>& coords = position;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N);
if(is_found_1 == true) //that was easy!
{
return true;
}
//to begin with we check the neighbour elements; it is a bit more expensive
GlobalPointersVector< Element >& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS);
//the first we check is the one that has negative shape function, because it means it went outside in this direction:
//commented, it is not faster than simply checking all the neighbours (branching)
/*
unsigned int checked_element=0;
for (unsigned int i=0;i!=(TDim+1);i++)
{
if (N[i]<0.0)
{
checked_element=i;
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N);
if (is_found_2)
{
pelement=Element::Pointer(((neighb_elems(i))));
N=aux_N;
return true;
}
break;
}
}
*/
//we check all the neighbour elements
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if (is_found_2)
{
pelement=neighb_elems(i)->shared_from_this();
return true;
}
}
//if checking all the neighbour elements did not work, we have to use the bins
//ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults );
if(results_found>0){
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry();
//find local position
bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if(is_found == true)
{
pelement=Element::Pointer((*(result_begin+i)));
return true;
}
}
}
//if nothing worked, then:
//not found case
return false;
}
// VERSION INCLUDING PREDEFINED ELEMENTS FOLLOWING A TRAJECTORY
bool FindNodeOnMesh( array_1d<double,3>& position,
array_1d<double,TDim+1>& N,
Element::Pointer & pelement,
GlobalPointersVector< Element >& elements_in_trajectory,
unsigned int & number_of_elements_in_trajectory,
unsigned int & check_from_element_number,
ResultIteratorType result_begin,
const unsigned int MaxNumberOfResults)
{
typedef std::size_t SizeType;
const array_1d<double,3>& coords = position;
array_1d<double,TDim+1> aux_N;
//before using the bin to search for possible elements we check first the last element in which the particle was.
Geometry<Node<3> >& geom_default = pelement->GetGeometry(); //(*(i))->GetGeometry();
bool is_found_1 = CalculatePosition(geom_default,coords[0],coords[1],coords[2],N);
if(is_found_1 == true)
{
return true; //that was easy!
}
//if it was not found in the first element, we can proceed to check in the following elements (in the trajectory defined by previous particles that started from the same element.
for (unsigned int i=(check_from_element_number);i!=number_of_elements_in_trajectory;i++)
{
Geometry<Node<3> >& geom = elements_in_trajectory[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N);
if (is_found_2)
{
pelement=elements_in_trajectory(i)->shared_from_this();
N=aux_N;
check_from_element_number = i+1 ; //now i element matches pelement, so to avoid cheching twice the same element we send the counter to the following element.
return true;
}
}
//now we check the neighbour elements:
auto& neighb_elems = pelement->GetValue(NEIGHBOUR_ELEMENTS);
//the first we check is the one that has negative shape function, because it means it went outside in this direction:
//commented, it is not faster than simply checking all the neighbours (branching)
/*
unsigned int checked_element=0;
for (unsigned int i=0;i!=(TDim+1);i++)
{
if (N[i]<0.0)
{
checked_element=i;
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],aux_N);
if (is_found_2)
{
pelement=Element::Pointer(((neighb_elems(i))));
N=aux_N;
return true;
}
break;
}
}
*/
//we check all the neighbour elements
for (unsigned int i=0;i!=(neighb_elems.size());i++)
{
Geometry<Node<3> >& geom = neighb_elems[i].GetGeometry();
bool is_found_2 = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if (is_found_2)
{
pelement=neighb_elems(i)->shared_from_this();
if (number_of_elements_in_trajectory<20)
{
elements_in_trajectory(number_of_elements_in_trajectory)=pelement;
number_of_elements_in_trajectory++;
check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
//if checking all the neighbour elements did not work, we have to use the bins
//ask to the container for the list of candidate elements
SizeType results_found = mpBinsObjectDynamic->SearchObjectsInCell(Point{coords}, result_begin, MaxNumberOfResults );
if(results_found>0)
{
//loop over the candidate elements and check if the particle falls within
for(SizeType i = 0; i< results_found; i++)
{
Geometry<Node<3> >& geom = (*(result_begin+i))->GetGeometry();
//find local position
bool is_found = CalculatePosition(geom,coords[0],coords[1],coords[2],N);
if(is_found == true)
{
pelement=Element::Pointer((*(result_begin+i)));
if (number_of_elements_in_trajectory<20)
{
elements_in_trajectory(number_of_elements_in_trajectory)=pelement;
number_of_elements_in_trajectory++;
check_from_element_number = number_of_elements_in_trajectory; //we do it after doing the ++ to the counter, so we woudlnt enter the loop that searches in the elements_in_trajectory list. we are the particle that is adding elements to the list
}
return true;
}
}
}
//not found case
return false;
}
//***************************************
//***************************************
inline bool CalculatePosition(Geometry<Node < 3 > >&geom,
const double xc, const double yc, const double zc,
array_1d<double, 3 > & N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
double inv_area = 0.0;
if (area == 0.0)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", "");
} else
{
inv_area = 1.0 / area;
}
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
//KRATOS_WATCH(N);
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true
return true;
return false;
}
////////////
//using the pre loaded nodal coordinates
inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions,
const double xc, const double yc, const double zc,
array_1d<double, 3 > & N
)
{
const double& x0 = nodes_positions[0];
const double& y0 = nodes_positions[1];
const double& x1 = nodes_positions[3];
const double& y1 = nodes_positions[4];
const double& x2 = nodes_positions[6];
const double& y2 = nodes_positions[7];
double area = CalculateVol(x0, y0, x1, y1, x2, y2);
double inv_area = 0.0;
if (area == 0.0)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero area found", "");
} else
{
inv_area = 1.0 / area;
}
N[0] = CalculateVol(x1, y1, x2, y2, xc, yc) * inv_area;
N[1] = CalculateVol(x2, y2, x0, y0, xc, yc) * inv_area;
N[2] = CalculateVol(x0, y0, x1, y1, xc, yc) * inv_area;
//KRATOS_WATCH(N);
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0) //if the xc yc is inside the triangle return true
return true;
return false;
}
//***************************************
//***************************************
inline bool CalculatePosition(Geometry<Node < 3 > >&geom,
const double xc, const double yc, const double zc,
array_1d<double, 4 > & N
)
{
double x0 = geom[0].X();
double y0 = geom[0].Y();
double z0 = geom[0].Z();
double x1 = geom[1].X();
double y1 = geom[1].Y();
double z1 = geom[1].Z();
double x2 = geom[2].X();
double y2 = geom[2].Y();
double z2 = geom[2].Z();
double x3 = geom[3].X();
double y3 = geom[3].Y();
double z3 = geom[3].Z();
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
double inv_vol = 0.0;
if (vol < 0.000000000000000000000000000001)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", "");
} else
{
inv_vol = 1.0 / vol;
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
///////////////////
//using the pre loaded nodal coordinates
inline bool CalculatePosition(const array_1d<double,3*(TDim+1)>& nodes_positions,
const double xc, const double yc, const double zc,
array_1d<double, 4 > & N
)
{
const double& x0 = nodes_positions[0];
const double& y0 = nodes_positions[1];
const double& z0 = nodes_positions[2];
const double& x1 = nodes_positions[3];
const double& y1 = nodes_positions[4];
const double& z1 = nodes_positions[5];
const double& x2 = nodes_positions[6];
const double& y2 = nodes_positions[7];
const double& z2 = nodes_positions[8];
const double& x3 = nodes_positions[9];
const double& y3 = nodes_positions[10];
const double& z3 = nodes_positions[11];
double vol = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3);
double inv_vol = 0.0;
if (vol < 0.000000000000000000000000000001)
{
KRATOS_THROW_ERROR(std::logic_error, "element with zero vol found", "");
} else
{
inv_vol = 1.0 / vol;
}
N[0] = CalculateVol(x1, y1, z1, x3, y3, z3, x2, y2, z2, xc, yc, zc) * inv_vol;
N[1] = CalculateVol(x0, y0, z0, x1, y1, z1, x2, y2, z2, xc, yc, zc) * inv_vol;
N[2] = CalculateVol(x3, y3, z3, x1, y1, z1, x0, y0, z0, xc, yc, zc) * inv_vol;
N[3] = CalculateVol(x3, y3, z3, x0, y0, z0, x2, y2, z2, xc, yc, zc) * inv_vol;
if (N[0] >= 0.0 && N[1] >= 0.0 && N[2] >= 0.0 && N[3] >= 0.0 &&
N[0] <= 1.0 && N[1] <= 1.0 && N[2] <= 1.0 && N[3] <= 1.0)
//if the xc yc zc is inside the tetrahedron return true
return true;
return false;
}
inline double CalculateVol(const double x0, const double y0,
const double x1, const double y1,
const double x2, const double y2
)
{
return 0.5 * ((x1 - x0)*(y2 - y0)- (y1 - y0)*(x2 - x0));
}
//***************************************
//***************************************
inline double CalculateVol(const double x0, const double y0, const double z0,
const double x1, const double y1, const double z1,
const double x2, const double y2, const double z2,
const double x3, const double y3, const double z3
)
{
double x10 = x1 - x0;
double y10 = y1 - y0;
double z10 = z1 - z0;
double x20 = x2 - x0;
double y20 = y2 - y0;
double z20 = z2 - z0;
double x30 = x3 - x0;
double y30 = y3 - y0;
double z30 = z3 - z0;
double detJ = x10 * y20 * z30 - x10 * y30 * z20 + y10 * z20 * x30 - y10 * x20 * z30 + z10 * x20 * y30 - z10 * y20 * x30;
return detJ * 0.1666666666666666666667;
}
void ComputeGaussPointPositions_4(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N)
{
double one_third = 1.0 / 3.0;
double one_sixt = 0.15; //1.0 / 6.0;
double two_third = 0.7; //2.0 * one_third;
N(0, 0) = one_sixt;
N(0, 1) = one_sixt;
N(0, 2) = two_third;
N(1, 0) = two_third;
N(1, 1) = one_sixt;
N(1, 2) = one_sixt;
N(2, 0) = one_sixt;
N(2, 1) = two_third;
N(2, 2) = one_sixt;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
//first
pos(0, 0) = one_sixt * geom[0].X() + one_sixt * geom[1].X() + two_third * geom[2].X();
pos(0, 1) = one_sixt * geom[0].Y() + one_sixt * geom[1].Y() + two_third * geom[2].Y();
pos(0, 2) = one_sixt * geom[0].Z() + one_sixt * geom[1].Z() + two_third * geom[2].Z();
//second
pos(1, 0) = two_third * geom[0].X() + one_sixt * geom[1].X() + one_sixt * geom[2].X();
pos(1, 1) = two_third * geom[0].Y() + one_sixt * geom[1].Y() + one_sixt * geom[2].Y();
pos(1, 2) = two_third * geom[0].Z() + one_sixt * geom[1].Z() + one_sixt * geom[2].Z();
//third
pos(2, 0) = one_sixt * geom[0].X() + two_third * geom[1].X() + one_sixt * geom[2].X();
pos(2, 1) = one_sixt * geom[0].Y() + two_third * geom[1].Y() + one_sixt * geom[2].Y();
pos(2, 2) = one_sixt * geom[0].Z() + two_third * geom[1].Z() + one_sixt * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
}
void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 7, 3 > & pos,BoundedMatrix<double, 7, 3 > & N) //2d
{
double one_third = 1.0 / 3.0;
double one_eight = 0.12; //1.0 / 6.0;
double three_quarters = 0.76; //2.0 * one_third;
N(0, 0) = one_eight;
N(0, 1) = one_eight;
N(0, 2) = three_quarters;
N(1, 0) = three_quarters;
N(1, 1) = one_eight;
N(1, 2) = one_eight;
N(2, 0) = one_eight;
N(2, 1) = three_quarters;
N(2, 2) = one_eight;
N(3, 0) = one_third;
N(3, 1) = one_third;
N(3, 2) = one_third;
N(4, 0) = one_eight;
N(4, 1) = 0.44;
N(4, 2) = 0.44;
N(5, 0) = 0.44;
N(5, 1) = one_eight;
N(5, 2) = 0.44;
N(6, 0) = 0.44;
N(6, 1) = 0.44;
N(6, 2) = one_eight;
//first
pos(0, 0) = one_eight * geom[0].X() + one_eight * geom[1].X() + three_quarters * geom[2].X();
pos(0, 1) = one_eight * geom[0].Y() + one_eight * geom[1].Y() + three_quarters * geom[2].Y();
pos(0, 2) = one_eight * geom[0].Z() + one_eight * geom[1].Z() + three_quarters * geom[2].Z();
//second
pos(1, 0) = three_quarters * geom[0].X() + one_eight * geom[1].X() + one_eight * geom[2].X();
pos(1, 1) = three_quarters * geom[0].Y() + one_eight * geom[1].Y() + one_eight * geom[2].Y();
pos(1, 2) = three_quarters * geom[0].Z() + one_eight * geom[1].Z() + one_eight * geom[2].Z();
//third
pos(2, 0) = one_eight * geom[0].X() + three_quarters * geom[1].X() + one_eight * geom[2].X();
pos(2, 1) = one_eight * geom[0].Y() + three_quarters * geom[1].Y() + one_eight * geom[2].Y();
pos(2, 2) = one_eight * geom[0].Z() + three_quarters * geom[1].Z() + one_eight * geom[2].Z();
//fourth
pos(3, 0) = one_third * geom[0].X() + one_third * geom[1].X() + one_third * geom[2].X();
pos(3, 1) = one_third * geom[0].Y() + one_third * geom[1].Y() + one_third * geom[2].Y();
pos(3, 2) = one_third * geom[0].Z() + one_third * geom[1].Z() + one_third * geom[2].Z();
//fifth
pos(4, 0) = one_eight * geom[0].X() + 0.44 * geom[1].X() + 0.44 * geom[2].X();
pos(4, 1) = one_eight * geom[0].Y() + 0.44 * geom[1].Y() + 0.44 * geom[2].Y();
pos(4, 2) = one_eight * geom[0].Z() + 0.44 * geom[1].Z() + 0.44 * geom[2].Z();
//sixth
pos(5, 0) = 0.44 * geom[0].X() + one_eight * geom[1].X() + 0.44 * geom[2].X();
pos(5, 1) = 0.44 * geom[0].Y() + one_eight * geom[1].Y() + 0.44 * geom[2].Y();
pos(5, 2) = 0.44 * geom[0].Z() + one_eight * geom[1].Z() + 0.44 * geom[2].Z();
//seventh
pos(6, 0) = 0.44 * geom[0].X() + 0.44 * geom[1].X() + one_eight * geom[2].X();
pos(6, 1) = 0.44 * geom[0].Y() + 0.44 * geom[1].Y() + one_eight * geom[2].Y();
pos(6, 2) = 0.44 * geom[0].Z() + 0.44 * geom[1].Z() + one_eight * geom[2].Z();
}
void ComputeGaussPointPositionsForPostReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 9, 3 > & pos,BoundedMatrix<double, 9, 4 > & N) //3D
{
double one_quarter = 0.25;
double small_fraction = 0.1; //1.0 / 6.0;
double big_fraction = 0.7; //2.0 * one_third;
double mid_fraction = 0.3; //2.0 * one_third;
N(0, 0) = big_fraction;
N(0, 1) = small_fraction;
N(0, 2) = small_fraction;
N(0, 3) = small_fraction;
N(1, 0) = small_fraction;
N(1, 1) = big_fraction;
N(1, 2) = small_fraction;
N(1, 3) = small_fraction;
N(2, 0) = small_fraction;
N(2, 1) = small_fraction;
N(2, 2) = big_fraction;
N(2, 3) = small_fraction;
N(3, 0) = small_fraction;
N(3, 1) = small_fraction;
N(3, 2) = small_fraction;
N(3, 3) = big_fraction;
N(4, 0) = one_quarter;
N(4, 1) = one_quarter;
N(4, 2) = one_quarter;
N(4, 3) = one_quarter;
N(5, 0) = small_fraction;
N(5, 1) = mid_fraction;
N(5, 2) = mid_fraction;
N(5, 3) = mid_fraction;
N(6, 0) = mid_fraction;
N(6, 1) = small_fraction;
N(6, 2) = mid_fraction;
N(6, 3) = mid_fraction;
N(7, 0) = mid_fraction;
N(7, 1) = mid_fraction;
N(7, 2) = small_fraction;
N(7, 3) = mid_fraction;
N(8, 0) = mid_fraction;
N(8, 1) = mid_fraction;
N(8, 2) = mid_fraction;
N(8, 3) = small_fraction;
pos=ZeroMatrix(9,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=9; j++) //going through the 9 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 3, 3 > & pos,BoundedMatrix<double, 3, 3 > & N) //2D
{
N(0, 0) = 0.5;
N(0, 1) = 0.25;
N(0, 2) = 0.25;
N(1, 0) = 0.25;
N(1, 1) = 0.5;
N(1, 2) = 0.25;
N(2, 0) = 0.25;
N(2, 1) = 0.25;
N(2, 2) = 0.5;
//first
pos(0, 0) = 0.5 * geom[0].X() + 0.25 * geom[1].X() + 0.25 * geom[2].X();
pos(0, 1) = 0.5 * geom[0].Y() + 0.25 * geom[1].Y() + 0.25 * geom[2].Y();
pos(0, 2) = 0.5 * geom[0].Z() + 0.25 * geom[1].Z() + 0.25 * geom[2].Z();
//second
pos(1, 0) = 0.25 * geom[0].X() + 0.5 * geom[1].X() + 0.25 * geom[2].X();
pos(1, 1) = 0.25 * geom[0].Y() + 0.5 * geom[1].Y() + 0.25 * geom[2].Y();
pos(1, 2) = 0.25 * geom[0].Z() + 0.5 * geom[1].Z() + 0.25 * geom[2].Z();
//third
pos(2, 0) = 0.25 * geom[0].X() + 0.25 * geom[1].X() + 0.5 * geom[2].X();
pos(2, 1) = 0.25 * geom[0].Y() + 0.25 * geom[1].Y() + 0.5 * geom[2].Y();
pos(2, 2) = 0.25 * geom[0].Z() + 0.25 * geom[1].Z() + 0.5 * geom[2].Z();
}
void ComputeGaussPointPositionsForPreReseed(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 4, 3 > & pos,BoundedMatrix<double, 4, 4 > & N) //3D
{
//creating 4 particles, each will be closer to a node and equidistant to the other nodes
N(0, 0) = 0.4;
N(0, 1) = 0.2;
N(0, 2) = 0.2;
N(0, 3) = 0.2;
N(1, 0) = 0.2;
N(1, 1) = 0.4;
N(1, 2) = 0.2;
N(1, 3) = 0.2;
N(2, 0) = 0.2;
N(2, 1) = 0.2;
N(2, 2) = 0.4;
N(2, 3) = 0.2;
N(3, 0) = 0.2;
N(3, 1) = 0.2;
N(3, 2) = 0.2;
N(3, 3) = 0.4;
pos=ZeroMatrix(4,3);
for (unsigned int i=0; i!=4; i++) //going through the 4 nodes
{
array_1d<double, 3 > & coordinates = geom[i].Coordinates();
for (unsigned int j=0; j!=4; j++) //going through the 4 particles
{
for (unsigned int k=0; k!=3; k++) //x,y,z
pos(j,k) += N(j,i) * coordinates[k];
}
}
}
void ComputeGaussPointPositions_45(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 45, 3 > & pos,BoundedMatrix<double, 45, 3 > & N)
{
//std::cout << "NEW ELEMENT" << std::endl;
unsigned int counter=0;
for (unsigned int i=0; i!=9;i++)
{
for (unsigned int j=0; j!=(9-i);j++)
{
N(counter,0)=0.05+double(i)*0.1;
N(counter,1)=0.05+double(j)*0.1;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
//std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl;
counter++;
}
}
}
void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 15, 3 > & pos,BoundedMatrix<double, 15, 3 > & N) //2D
{
//std::cout << "NEW ELEMENT" << std::endl;
unsigned int counter=0;
for (unsigned int i=0; i!=5;i++)
{
for (unsigned int j=0; j!=(5-i);j++)
{
N(counter,0)=0.05+double(i)*0.2;
N(counter,1)=0.05+double(j)*0.2;
N(counter,2)=1.0 - ( N(counter,1)+ N(counter,0) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z();
//std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl;
counter++;
}
}
}
void ComputeGaussPointPositions_initial(Geometry< Node < 3 > >& geom, BoundedMatrix<double, 20, 3 > & pos,BoundedMatrix<double, 20, 4 > & N) //3D
{
//std::cout << "NEW ELEMENT" << std::endl;
//double total;
double fraction_increment;
unsigned int counter=0;
for (unsigned int i=0; i!=4;i++) //going to build a particle "pyramid"(tetrahedra) by layers. the first layer will be made by a triangle of 4 base X 4 height. since it is a triangle, it means it will have 10 particles
{
//std::cout << "inside i" << i << std::endl;
for (unsigned int j=0; j!=(4-i);j++)
{
//std::cout << "inside j" << j << std::endl;
for (unsigned int k=0; k!=(4-i-j);k++)
{
//std::cout << "inside k" << k << std::endl;
N(counter,0)= 0.27 * ( 0.175 + double(i) ) ; //this is our "surface" in which we will build each layer, so we must construct a triangle using what's left of the shape functions total (a total of 1)
//total = 1.0 - N(counter,0);
fraction_increment = 0.27; //
N(counter,1)=fraction_increment * (0.175 + double(j));
N(counter,2)=fraction_increment * (0.175 + double(k));
N(counter,3)=1.0 - ( N(counter,0)+ N(counter,1) + N(counter,2) ) ;
pos(counter, 0) = N(counter,0) * geom[0].X() + N(counter,1) * geom[1].X() + N(counter,2) * geom[2].X() + N(counter,3) * geom[3].X();
pos(counter, 1) = N(counter,0) * geom[0].Y() + N(counter,1) * geom[1].Y() + N(counter,2) * geom[2].Y() + N(counter,3) * geom[3].Y();
pos(counter, 2) = N(counter,0) * geom[0].Z() + N(counter,1) * geom[1].Z() + N(counter,2) * geom[2].Z() + N(counter,3) * geom[3].Z();
//std::cout << N(counter,0) << " " << N(counter,1) << " " << N(counter,2) << " " << std::endl;
counter++;
}
}
}
}
template<class T>
bool InvertMatrix(const T& input, T& inverse)
{
typedef permutation_matrix<std::size_t> pmatrix;
// create a working copy of the input
T A(input);
// create a permutation matrix for the LU-factorization
pmatrix pm(A.size1());
// perform LU-factorization
int res = lu_factorize(A, pm);
if (res != 0)
return false;
// create identity matrix of "inverse"
inverse.assign(identity_matrix<double> (A.size1()));
// backsubstitute to get the inverse
lu_substitute(A, pm, inverse);
return true;
}
bool InvertMatrix3x3(const BoundedMatrix<double, TDim+1 , TDim+1 >& A, BoundedMatrix<double, TDim+1 , TDim+1 >& result)
{
double determinant = +A(0,0)*(A(1,1)*A(2,2)-A(2,1)*A(1,2))
-A(0,1)*(A(1,0)*A(2,2)-A(1,2)*A(2,0))
+A(0,2)*(A(1,0)*A(2,1)-A(1,1)*A(2,0));
double invdet = 1/determinant;
result(0,0) = (A(1,1)*A(2,2)-A(2,1)*A(1,2))*invdet;
result(1,0) = -(A(0,1)*A(2,2)-A(0,2)*A(2,1))*invdet;
result(2,0) = (A(0,1)*A(1,2)-A(0,2)*A(1,1))*invdet;
result(0,1) = -(A(1,0)*A(2,2)-A(1,2)*A(2,0))*invdet;
result(1,1) = (A(0,0)*A(2,2)-A(0,2)*A(2,0))*invdet;
result(2,1) = -(A(0,0)*A(1,2)-A(1,0)*A(0,2))*invdet;
result(0,2) = (A(1,0)*A(2,1)-A(2,0)*A(1,1))*invdet;
result(1,2) = -(A(0,0)*A(2,1)-A(2,0)*A(0,1))*invdet;
result(2,2) = (A(0,0)*A(1,1)-A(1,0)*A(0,1))*invdet;
return true;
}
virtual int Check()
{
KRATOS_TRY
ProcessInfo& rCurrentProcessInfo = mr_model_part.GetProcessInfo();
if (rCurrentProcessInfo.Has(CONVECTION_DIFFUSION_SETTINGS)==false)
KRATOS_THROW_ERROR(std::logic_error, "no CONVECTION_DIFFUSION_SETTINGS in model_part", "");
//std::cout << "ConvDiff::Check(). If crashes, check CONVECTION_DIFFUSION_SETTINGS is defined" << std::endl;
ConvectionDiffusionSettings::Pointer my_settings = rCurrentProcessInfo.GetValue(CONVECTION_DIFFUSION_SETTINGS);
//UNKNOWN VARIABLE
if(my_settings->IsDefinedUnknownVariable()==true)
{
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetUnknownVariable()) == false)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable defined but not contained in the model part", "");
}
else
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Unknown Variable not defined!", "");
//PROJECTION VARIABLE
//used as intermediate variable, is the variable at time n+1 but only accounting for the convective term.
if(my_settings->IsDefinedProjectionVariable()==true)
{
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetProjectionVariable()) == false)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Projection Variable defined but not contained in the model part", "");
}
else
KRATOS_THROW_ERROR(std::logic_error, "No Projection variable assigned for ConvDiff!", "");
//CONVECTION VELOCITY VARIABLE
//CURRENTLY WE ARE USING (VELOCITY -MESH_VELOCITY) TO CONVECT, so the ConvectionVariable must not be used:
//if(my_settings->IsDefinedConvectionVariable()==true)
//{
// if (BaseType::GetModelPart().NodesBegin()->SolutionStepsDataHas(my_settings->GetConvectionVariable()) == false)
// KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Convection Variable defined but not contained in the model part", "");
//}
//else
// std::cout << "No Projection variable assigned for ConvDiff. Assuming Convection=0" << std::endl;
if(my_settings->IsDefinedConvectionVariable()==true)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: ConvectionVariable not used. Use VelocityVariable instead", "");
//VELOCITY VARIABLE
if(my_settings->IsDefinedVelocityVariable()==true)
{
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(my_settings->GetVelocityVariable()) == false)
KRATOS_THROW_ERROR(std::logic_error, "ConvDiffSettings: Velocity Variable defined but not contained in the model part", "");
}
else
KRATOS_THROW_ERROR(std::logic_error, "No Velocity variable assigned for ConvDiff!", "");
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(MEAN_SIZE) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add MEAN_SIZE variable to model part!", "");
if (mr_model_part.NodesBegin()->SolutionStepsDataHas(DELTA_SCALAR1) == false)
KRATOS_THROW_ERROR(std::logic_error, "Add DELTA_SCALAR1 variable to model part!", "");
return 0;
KRATOS_CATCH("")
}
ModelPart& mr_model_part;
int m_nparticles;
int mnelems;
int moffset;
//vector<double> mareas_vector; UNUSED SO COMMENTED
int max_nsubsteps;
double max_substep_dt;
int mmaximum_number_of_particles;
std::vector< Convection_Particle > mparticles_vector; //Point<3>
int mlast_elem_id;
bool modd_timestep;
bool mparticle_printing_tool_initialized;
unsigned int mfilter_factor;
unsigned int mlast_node_id;
//ModelPart& mr_particle_model_part;
vector<int> mnumber_of_particles_in_elems;
vector<int> mnumber_of_particles_in_elems_aux;
//vector<ParticlePointerVector*> mpointers_to_particle_pointers_vectors; //pointing to the GetValue of each element
vector<ParticlePointerVector> mvector_of_particle_pointers_vectors;
typename BinsObjectDynamic<Configure>::Pointer mpBinsObjectDynamic;
const Variable<double>& mUnknownVar;
const Variable<double>& mProjectionVar;
const Variable<array_1d<double,3> >& mVelocityVar;
const Variable<array_1d<double,3> >& mMeshVelocityVar;
};
} // namespace Kratos.
#endif // KRATOS_MOVE_PARTICLE_UTILITY_FLUID_PFEM2_TRANSPORT_INCLUDED defined
|
hd_joint_probability_generator_inl.h | /*
*
* Copyright (c) 2014, Nicola Pezzotti (Delft University of Technology)
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* This product includes software developed by the Delft University of Technology.
* 4. Neither the name of the Delft University of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY NICOLA PEZZOTTI ''AS IS'' AND ANY EXPRESS
* OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL NICOLA PEZZOTTI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
* OF SUCH DAMAGE.
*
*/
#ifndef HD_JOINT_PROBABILITY_GENERATOR_INL
#define HD_JOINT_PROBABILITY_GENERATOR_INL
#include "hdi/dimensionality_reduction/hd_joint_probability_generator.h"
#include "hdi/utils/math_utils.h"
#include "hdi/utils/log_helper_functions.h"
#include "hdi/utils/scoped_timers.h"
#include <random>
#include <chrono>
#include <unordered_set>
#include <numeric>
#ifdef __USE_GCD__
#include <dispatch/dispatch.h>
#endif
#pragma warning( push )
#pragma warning( disable : 4267)
#pragma warning( push )
#pragma warning( disable : 4291)
#pragma warning( push )
#pragma warning( disable : 4996)
#pragma warning( push )
#pragma warning( disable : 4018)
#pragma warning( push )
#pragma warning( disable : 4244)
#include "flann/flann.h"
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
#pragma warning( pop )
namespace hdi{
namespace dr{
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::Parameters::Parameters():
_perplexity(30),
_perplexity_multiplier(3),
_num_trees(4),
_num_checks(1024)
{}
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::Statistics::Statistics():
_total_time(0),
_trees_construction_time(0),
_aknn_time(0),
_distribution_time(0)
{}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::Statistics::reset(){
_total_time = 0;
_trees_construction_time = 0;
_aknn_time = 0;
_distribution_time = 0;
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::Statistics::log(utils::AbstractLog* logger)const{
utils::secureLog(logger,"\n-------- HD Joint Probability Generator Statistics -----------");
utils::secureLogValue(logger,"Total time",_total_time);
utils::secureLogValue(logger,"\tTrees construction time",_trees_construction_time,true,1);
utils::secureLogValue(logger,"\tAKNN time",_aknn_time,true,3);
utils::secureLogValue(logger,"\tDistributions time",_distribution_time,true,2);
utils::secureLog(logger,"--------------------------------------------------------------\n");
}
/////////////////////////////////////////////////////////////////////////
template <typename scalar, typename sparse_scalar_matrix>
HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::HDJointProbabilityGenerator():
_logger(nullptr)
{
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeJointProbabilityDistribution(scalar_type* high_dimensional_data, unsigned int num_dim, unsigned int num_dps, sparse_scalar_matrix& distribution, Parameters params){
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time);
hdi::utils::secureLog(_logger,"Computing the HD joint probability distribution...");
distribution.resize(num_dps);
std::vector<scalar_type> distances_squared;
std::vector<int> indices;
computeHighDimensionalDistances(high_dimensional_data, num_dim, num_dps, distances_squared, indices, params);
computeGaussianDistributions(distances_squared,indices,distribution,params);
symmetrize(distribution);
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeProbabilityDistributions(scalar_type* high_dimensional_data, unsigned int num_dim, unsigned int num_dps, sparse_scalar_matrix& distribution, Parameters params){
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time);
hdi::utils::secureLog(_logger,"Computing the HD joint probability distribution...");
distribution.resize(num_dps);
std::vector<scalar_type> distances_squared;
std::vector<int> indices;
computeHighDimensionalDistances(high_dimensional_data, num_dim, num_dps, distances_squared, indices, params);
computeGaussianDistributions(distances_squared,indices,distribution,params);
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeProbabilityDistributions(scalar_type* high_dimensional_data, unsigned int num_dim, unsigned int num_dps, std::vector<scalar_type>& probabilities, std::vector<int>& indices, Parameters params){
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._total_time);
hdi::utils::secureLog(_logger,"Computing the HD joint probability distribution...");
std::vector<scalar_type> distances_squared;
computeHighDimensionalDistances(high_dimensional_data, num_dim, num_dps, distances_squared, indices, params);
computeGaussianDistributions(distances_squared,indices,probabilities,params);
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeHighDimensionalDistances(scalar_type* high_dimensional_data, unsigned int num_dim, unsigned int num_dps, std::vector<scalar_type>& distances_squared, std::vector<int>& indices, Parameters& params){
hdi::utils::secureLog(_logger,"Computing nearest neighborhoods...");
flann::Matrix<scalar_type> dataset (high_dimensional_data,num_dps,num_dim);
flann::Matrix<scalar_type> query (high_dimensional_data,num_dps,num_dim);
flann::Index<flann::L2<scalar_type> > index(dataset, flann::KDTreeIndexParams(params._num_trees));
const unsigned int nn = params._perplexity*params._perplexity_multiplier + 1;
distances_squared.resize(num_dps*nn);
indices.resize(num_dps*nn);
{
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._trees_construction_time);
index.buildIndex();
}
{
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._aknn_time);
flann::Matrix<int> indices_mat(indices.data(), query.rows, nn);
flann::Matrix<scalar_type> dists_mat(distances_squared.data(), query.rows, nn);
flann::SearchParams flann_params(params._num_checks);
flann_params.cores = 0; //all cores
index.knnSearch(query, indices_mat, dists_mat, nn, flann_params);
}
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeGaussianDistributions(const std::vector<scalar_type>& distances_squared, const std::vector<int>& indices, sparse_scalar_matrix& distribution, Parameters& params){
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._distribution_time);
utils::secureLog(_logger,"Computing joint-probability distribution...");
const int n = distribution.size();
const unsigned int nn = params._perplexity*params._perplexity_multiplier + 1;
#ifdef __USE_GCD__
__block scalar_vector_type temp_vector(distances_squared.size(),0);
#else
scalar_vector_type temp_vector(distances_squared.size(),0);
#endif //__USE_GCD__
#ifdef __USE_GCD__
std::cout << "GCD dispatch, hd_joint_probability_generator 193.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__USE_GCD__
const auto sigma = utils::computeGaussianDistributionWithFixedPerplexity<scalar_vector_type>(
distances_squared.begin() + j*nn, //check squared
distances_squared.begin() + (j + 1)*nn,
temp_vector.begin() + j*nn,
temp_vector.begin() + (j + 1)*nn,
params._perplexity,
200,
1e-5,
0
);
}
#ifdef __USE_GCD__
);
#endif
for(int j = 0; j < n; ++j){
for(int k = 1; k < nn; ++k){
const unsigned int i = j*nn+k;
distribution[j][indices[i]] = temp_vector[i];
}
}
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeGaussianDistributions(const std::vector<scalar_type>& distances_squared, const std::vector<int>& indices, std::vector<scalar_type>& probabilities, Parameters& params){
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._distribution_time);
utils::secureLog(_logger,"Computing joint-probability distribution...");
const unsigned int nn = params._perplexity*params._perplexity_multiplier + 1;
const int n = indices.size()/nn;
#ifdef __USE_GCD__
std::cout << "GCD dispatch, hd_joint_probability_generator 232.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__USE_GCD__
const auto sigma = utils::computeGaussianDistributionWithFixedPerplexity<scalar_vector_type>(
distances_squared.begin() + j*nn, //check squared
distances_squared.begin() + (j + 1)*nn,
probabilities.begin() + j*nn,
probabilities.begin() + (j + 1)*nn,
params._perplexity,
200,
1e-5,
0
);
}
#ifdef __USE_GCD__
);
#endif
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::symmetrize(sparse_scalar_matrix& distribution){
const int n = distribution.size();
for(int j = 0; j < n; ++j){
for(auto& e: distribution[j]){
const unsigned int i = e.first;
scalar new_val = (distribution[j][i]+distribution[i][j])*0.5;
distribution[j][i] = new_val;
distribution[i][j] = new_val;
}
}
}
template <typename scalar, typename sparse_scalar_matrix>
void HDJointProbabilityGenerator<scalar, sparse_scalar_matrix>::computeProbabilityDistributionsFromDistanceMatrix(const std::vector<scalar_type>& squared_distance_matrix, unsigned int num_dps, sparse_scalar_matrix& distribution, Parameters params){
utils::ScopedTimer<scalar_type, utils::Seconds> timer(_statistics._distribution_time);
utils::secureLog(_logger,"Computing joint-probability distribution...");
const int n = num_dps;
const unsigned int nn = num_dps;
#ifdef __USE_GCD__
__block scalar_vector_type temp_vector(num_dps*num_dps,0);
#else
scalar_vector_type temp_vector(num_dps*num_dps,0);
#endif //__USE_GCD__
distribution.clear();
distribution.resize(n);
#ifdef __USE_GCD__
std::cout << "GCD dispatch, hd_joint_probability_generator 193.\n";
dispatch_apply(n, dispatch_get_global_queue(0, 0), ^(size_t j) {
#else
#pragma omp parallel for
for(int j = 0; j < n; ++j){
#endif //__USE_GCD__
const auto sigma = utils::computeGaussianDistributionWithFixedPerplexity<scalar_vector_type>(
squared_distance_matrix.begin() + j*nn, //check squared
squared_distance_matrix.begin() + (j + 1)*nn,
temp_vector.begin() + j*nn,
temp_vector.begin() + (j + 1)*nn,
params._perplexity,
200,
1e-5,
j
);
}
#ifdef __USE_GCD__
);
#endif
for(int j = 0; j < n; ++j){
for(int k = 0; k < nn; ++k){
const unsigned int i = j*nn+k;
distribution[j][k] = temp_vector[i];
}
}
}
///////////////////////////////////////////////////////////////////////////////////7
}
}
#endif
|
hotspot99.c | /**
* LICENSE TERMS
Copyright (c)2008-2010 University of Virginia
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted without royalty fees or other restrictions, provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the University of Virginia, the Dept. of Computer Science, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF VIRGINIA OR THE SOFTWARE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you use this software or a modified version of it, please cite the most relevant among the following papers:
1) S. Che, M. Boyer, J. Meng, D. Tarjan, J. W. Sheaffer, Sang-Ha Lee and K. Skadron.
"Rodinia: A Benchmark Suite for Heterogeneous Computing". IEEE International Symposium
on Workload Characterization, Oct 2009.
2) J. Meng and K. Skadron. "Performance Modeling and Automatic Ghost Zone Optimization
for Iterative Stencil Loops on GPUs." In Proceedings of the 23rd Annual ACM International
Conference on Supercomputing (ICS), June 2009.
3) L.G. Szafaryn, K. Skadron and J. Saucerman. "Experiences Accelerating MATLAB Systems
Biology Applications." in Workshop on Biomedicine in Computing (BiC) at the International
Symposium on Computer Architecture (ISCA), June 2009.
4) M. Boyer, D. Tarjan, S. T. Acton, and K. Skadron. "Accelerating Leukocyte Tracking using CUDA:
A Case Study in Leveraging Manycore Coprocessors." In Proceedings of the International Parallel
and Distributed Processing Symposium (IPDPS), May 2009.
5) S. Che, M. Boyer, J. Meng, D. Tarjan, J. W. Sheaffer, and K. Skadron. "A Performance
Study of General Purpose Applications on Graphics Processors using CUDA" Journal of
Parallel and Distributed Computing, Elsevier, June 2008.
6) S. Che, J. Li, J. W. Sheaffer, K. Skadron, and J. Lach. "Accelerating Compute
Intensive Applications with GPUs and FPGAs" In Proceedings of the IEEE Symposium
on Application Specific Processors (SASP), June 2008.
*
*/
/**
* This file was converted into C99 form by Mehdi Amini
* 05 june 2011
*/
#include <timing.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
#include <sys/time.h>
#define STR_SIZE 256
/* maximum power density possible (say 300W for a 10mm x 10mm chip) */
#define MAX_PD (3.0e6)
/* required precision in degrees */
#define PRECISION 0.001
#define SPEC_HEAT_SI 1.75e6
#define K_SI 100
/* capacitance fitting factor */
#define FACTOR_CHIP 0.5
//#define OPEN
//#define NUM_THREAD 4
/* chip parameters */
double t_chip = 0.0005;
double chip_height = 0.016;
double chip_width = 0.016;
/* ambient temperature, assuming no package at all */
double amb_temp = 80.0;
int num_omp_threads;
/* Single iteration of the transient solver in the grid model.
* advances the solution of the discretized difference equations
* by one time step
*/
void single_iteration(int row,
int col,
double result[row][col],
double temp[row][col],
double power[row][col],
double Cap,
double Rx,
double Ry,
double Rz,
double step) {
double delta;
int r, c;
//printf("num_omp_threads: %d\n", num_omp_threads);
#ifdef PGI_ACC
#pragma acc region
{
#endif
#ifdef OPEN
omp_set_num_threads(num_omp_threads);
#pragma omp parallel for shared(power, temp,result) private(r, c, delta) firstprivate(row, col) schedule(static)
#endif
for (r = 0; r < row; r++) {
for (c = 0; c < col; c++) {
/* Corner 1 */
if((r == 0) && (c == 0)) {
delta = (step / Cap) * (power[0][0] + (temp[0][1] - temp[0][0]) / Rx
+ (temp[0][col] - temp[0][0]) / Ry + (amb_temp - temp[0][0]) / Rz);
} /* Corner 2 */
else if((r == 0) && (c == col - 1)) {
delta = (step / Cap) * (power[0][c] + (temp[0][c - 1] - temp[0][c])
/ Rx + (temp[1][c] - temp[0][c]) / Ry + (amb_temp - temp[0][c])
/ Rz);
} /* Corner 3 */
else if((r == row - 1) && (c == col - 1)) {
delta = (step / Cap) * (power[r][c] + (temp[r][c - 1] - temp[r][c])
/ Rx + (temp[r - 1][c] - temp[r][c]) / Ry + (amb_temp - temp[r][c])
/ Rz);
} /* Corner 4 */
else if((r == row - 1) && (c == 0)) {
delta = (step / Cap)
* (power[r][0] + (temp[r][1] - temp[r][0]) / Rx + (temp[r - 1][0]
- temp[r][0]) / Ry + (amb_temp - temp[r][0]) / Rz);
} /* Edge 1 */
else if(r == 0) {
delta = (step / Cap) * (power[0][c] + (temp[0][c + 1] + temp[0][c - 1]
- 2.0 * temp[0][c]) / Rx + (temp[1][c] - temp[0][c]) / Ry
+ (amb_temp - temp[0][c]) / Rz);
} /* Edge 2 */
else if(c == col - 1) {
delta = (step / Cap) * (power[r][c] + (temp[r + 1][c] + temp[r - 1][c]
- 2.0 * temp[r][c]) / Ry + (temp[r][c - 1] - temp[r][c]) / Rx
+ (amb_temp - temp[r][c]) / Rz);
} /* Edge 3 */
else if(r == row - 1) {
delta = (step / Cap) * (power[r][c] + (temp[r][c + 1] + temp[r][c - 1]
- 2.0 * temp[r][c]) / Rx + (temp[r - 1][c] - temp[r][c])
/ Ry + (amb_temp - temp[r][c]) / Rz);
} /* Edge 4 */
else if(c == 0) {
delta = (step / Cap) * (power[r][0] + (temp[r+1][0] + temp[r-1][0] - 2.0 * temp[r][0]) / Ry + (temp[r+1][0]
- temp[r][0]) / Rx + (amb_temp - temp[r][0]) / Rz);
} /* Inside the chip */
else {
delta = (step / Cap) * (power[r][c] + (temp[r + 1][c]
+ temp[r-1][c] - 2.0 * temp[r][c]) / Ry + (temp[r][c + 1] + temp[r][c - 1] - 2.0 * temp[r][c])
/ Rx + (amb_temp - temp[r][c]) / Rz);
}
/* Update Temperatures */
result[r][c] = temp[r][c] + delta;
}
}
#ifdef OPEN
omp_set_num_threads(num_omp_threads);
#pragma omp parallel for shared(result, temp) private(r, c) schedule(static)
#endif
for (r = 0; r < row; r++) {
for (c = 0; c < col; c++) {
temp[r][c] = result[r][c];
}
}
#ifdef PGI_ACC
}
#endif
}
/* Transient solver driver routine: simply converts the heat
* transfer differential equations to difference equations
* and solves the difference equations by iterating
*/
void compute_tran_temp(int row,
int col,
double result[row][col],
int num_iterations,
double temp[row][col],
double power[row][col]) {
#ifdef VERBOSE
int i = 0;
#endif
double grid_height = chip_height / row;
double grid_width = chip_width / col;
double Cap = FACTOR_CHIP * SPEC_HEAT_SI * t_chip * grid_width * grid_height;
double Rx = grid_width / (2.0 * K_SI * t_chip * grid_height);
double Ry = grid_height / (2.0 * K_SI * t_chip * grid_width);
double Rz = t_chip / (K_SI * grid_height * grid_width);
double max_slope = MAX_PD / (FACTOR_CHIP * t_chip * SPEC_HEAT_SI);
double step = PRECISION / max_slope;
double t;
#ifdef VERBOSE
fprintf(stdout, "total iterations: %d s\tstep size: %g s\n", num_iterations, step);
fprintf(stdout, "Rx: %g\tRy: %g\tRz: %g\tCap: %g\n", Rx, Ry, Rz, Cap);
#endif
for (int i = 0; i < num_iterations; i++) {
#ifdef VERBOSE
fprintf(stdout, "iteration %d\n", i++);
#endif
single_iteration(row, col, result, temp, power, Cap, Rx, Ry, Rz, step);
}
#ifdef VERBOSE
fprintf(stdout, "iteration %d\n", i++);
#endif
}
void fatal(char *s) {
fprintf(stderr, "error: %s\n", s);
exit(1);
}
void read_input(int grid_rows,
int grid_cols,
double vect[grid_rows][grid_cols],
char *file) {
int i, j, index;
FILE *fp;
char str[STR_SIZE];
double val;
fp = fopen(file, "r");
if(!fp)
fatal("file could not be opened for reading");
for (i = 0; i < grid_rows; i++) {
for (j = 0; j < grid_cols; j++) {
char *s = fgets(str, STR_SIZE, fp);
if(feof(fp))
fatal("not enough lines in file");
if((sscanf(str, "%lf", &val) != 1))
fatal("invalid file format");
vect[i][j] = val;
}
}
fclose(fp);
}
void usage(int argc, char **argv) {
fprintf(stderr,
"Usage: %s <grid_rows> <grid_cols> <sim_time> <no. of threads><temp_file> <power_file>\n",
argv[0]);
fprintf(stderr,
"\t<grid_rows> - number of rows in the grid (positive integer)\n");
fprintf(stderr,
"\t<grid_cols> - number of columns in the grid (positive integer)\n");
fprintf(stderr, "\t<sim_time> - number of iterations\n");
fprintf(stderr, "\t<no. of threads> - number of threads\n");
fprintf(stderr,
"\t<temp_file> - name of the file containing the initial temperature values of each cell\n");
fprintf(stderr,
"\t<power_file> - name of the file containing the dissipated power values of each cell\n");
exit(1);
}
int main(int argc, char **argv) {
int grid_rows, grid_cols, sim_time, i,j;
//double *temp, *power, *result;
char *tfile, *pfile;
/* check validity of inputs */
if(argc != 7)
usage(argc, argv);
if((grid_rows = atoi(argv[1])) <= 0 || (grid_cols = atoi(argv[2])) <= 0
|| (sim_time = atoi(argv[3])) <= 0 || (num_omp_threads = atoi(argv[4]))
<= 0)
usage(argc, argv);
/* allocate memory for the temperature and power arrays */
double temp[grid_rows][grid_cols];
double power[grid_rows][grid_cols];
double result[grid_rows][grid_cols];
memset(temp,0,sizeof(temp));
memset(power,0,sizeof(temp));
memset(result,0,sizeof(temp));
/* read initial temperatures and input power */
tfile = argv[5];
pfile = argv[6];
read_input(grid_rows, grid_cols, temp, tfile);
read_input(grid_rows, grid_cols, power, pfile);
/* Start timer. */
timer_start();
/* Cheat the compiler to limit the scope of optimisation */
if(argv[5]==0) {
memset(temp,0,sizeof(temp));
memset(power,0,sizeof(temp));
memset(result,0,sizeof(temp));
}
// Main computation
compute_tran_temp(grid_rows, grid_cols, result, sim_time, temp, power);
/* Cheat the compiler to limit the scope of optimisation */
if(argv[5]==0) {
for(i=0; i < grid_rows; i++) {
for(j=0; j < grid_cols; j++) {
fprintf(stdout, "%d\t%g\n",(i*grid_cols)+j , temp[i][j]);
}
}
}
/* Stop and print timer. */
timer_stop_display();
/*** ***/
/* output results */
#ifdef VERBOSE
fprintf(stdout, "Final Temperatures:\n");
#endif
#ifdef OUTPUT
for(i=0; i < grid_rows; i++)
for(j=0; j < grid_cols; j++) {
fprintf(stdout, "%d\t%g\n",(i*grid_cols)+j , temp[i][j]);
}
#endif
/* cleanup */
// free(temp);
// free(power);
return 0;
}
|
UniOP.h | #ifndef UNIOP_H_
#define UNIOP_H_
/*
* UniOP.h:
* a simple feed forward neural operation, unary input.
*
* Created on: Apr 22, 2017
* Author: mszhang
*/
#include "Param.h"
#include "MyLib.h"
#include "Node.h"
#include "Graph.h"
#include "ModelUpdate.h"
class UniParams {
public:
Param W;
Param b;
bool bUseB;
public:
UniParams() {
bUseB = true;
}
inline void exportAdaParams(ModelUpdate& ada) {
ada.addParam(&W);
if (bUseB) {
ada.addParam(&b);
}
}
inline void initial(int nOSize, int nISize, bool useB = true) {
W.initial(nOSize, nISize);
bUseB = useB;
if (bUseB) {
b.initial(nOSize, 1);
}
}
inline void save(std::ofstream &os) const {
os << bUseB << std::endl;
W.save(os);
if (bUseB) {
b.save(os);
}
}
inline void load(std::ifstream &is) {
is >> bUseB;
W.load(is);
if (bUseB) {
b.load(is);
}
}
};
// non-linear feed-forward node
// input nodes should be specified by forward function
// for input variables, we exploit column vector,
// which means a concrete input vector x_i is represented by x(0, i), x(1, i), ..., x(n, i)
class UniNode : public Node {
public:
PNode in;
UniParams* param;
dtype(*activate)(const dtype&); // activation function
dtype(*derivate)(const dtype&, const dtype&); // derivation function of activation function
Tensor1D ty, lty;
public:
UniNode() : Node() {
in = NULL;
activate = ftanh;
derivate = dtanh;
param = NULL;
node_type = "uni";
}
~UniNode() {
in = NULL;
}
inline void init(int ndim, dtype dropout) {
Node::init(ndim, dropout);
ty.init(ndim);
lty.init(ndim);
}
inline void setParam(UniParams* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
in = NULL;
ty = 0;
lty = 0;
}
// define the activate function and its derivation form
inline void setFunctions(dtype(*f)(const dtype&), dtype(*f_deri)(const dtype&, const dtype&)) {
activate = f;
derivate = f_deri;
}
public:
void forward(Graph *cg, PNode x) {
in = x;
degree = 0;
in->addParent(this);
cg->addNode(this);
}
public:
inline void compute() {
ty.mat() = param->W.val.mat() * in->val.mat();
if (param->bUseB) {
ty.vec() += param->b.val.vec();
}
val.vec() = ty.vec().unaryExpr(ptr_fun(activate));
}
inline void backward() {
lty.vec() = loss.vec() * ty.vec().binaryExpr(val.vec(), ptr_fun(derivate));
param->W.grad.mat() += lty.mat() * in->val.tmat();
if (param->bUseB) {
param->b.grad.vec() += lty.vec();
}
in->loss.mat() += param->W.val.mat().transpose() * lty.mat();
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
bool result = Node::typeEqual(other);
if (!result) return false;
UniNode* conv_other = (UniNode*)other;
if (param != conv_other->param) {
return false;
}
if (activate != conv_other->activate || derivate != conv_other->derivate) {
return false;
}
return true;
}
};
// non-linear feed-forward node
// input nodes should be specified by forward function
// for input variables, we exploit column vector,
// which means a concrete input vector x_i is represented by x(0, i), x(1, i), ..., x(n, i)
class LinearUniNode : public Node {
public:
PNode in;
UniParams* param;
public:
LinearUniNode() : Node() {
in = NULL;
param = NULL;
node_type = "linear_uni";
}
inline void setParam(UniParams* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
in = NULL;
}
public:
void forward(Graph *cg, PNode x) {
in = x;
degree = 0;
in->addParent(this);
cg->addNode(this);
}
public:
inline void compute() {
val.mat() = param->W.val.mat() * in->val.mat();
if (param->bUseB) {
val.vec() += param->b.val.vec();
}
}
inline void backward() {
param->W.grad.mat() += loss.mat() * in->val.tmat();
if (param->bUseB) {
param->b.grad.vec() += loss.vec();
}
in->loss.mat() += param->W.val.mat().transpose() * loss.mat();
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
bool result = Node::typeEqual(other);
if (!result) return false;
LinearUniNode* conv_other = (LinearUniNode*)other;
if (param != conv_other->param) {
return false;
}
return true;
}
};
// non-linear feed-forward node
// input nodes should be specified by forward function
// for input variables, we exploit column vector,
// which means a concrete input vector x_i is represented by x(0, i), x(1, i), ..., x(n, i)
class LinearNode : public Node {
public:
PNode in;
UniParams* param;
public:
LinearNode() : Node() {
in = NULL;
param = NULL;
node_type = "linear";
}
inline void setParam(UniParams* paramInit) {
param = paramInit;
}
inline void clearValue() {
Node::clearValue();
in = NULL;
}
public:
void forward(Graph *cg, PNode x) {
in = x;
degree = 0;
in->addParent(this);
cg->addNode(this);
}
public:
inline void compute() {
val.mat() = param->W.val.mat() * in->val.mat();
}
inline void backward() {
param->W.grad.mat() += loss.mat() * in->val.tmat();
in->loss.mat() += param->W.val.mat().transpose() * loss.mat();
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
bool result = Node::typeEqual(other);
if (!result) return false;
LinearNode* conv_other = (LinearNode*)other;
if (param != conv_other->param) {
return false;
}
return true;
}
};
class UniExecute :public Execute {
public:
bool bTrain;
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
inline PExecute UniNode::generate(bool bTrain, dtype cur_drop_factor) {
UniExecute* exec = new UniExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
return exec;
};
class LinearUniExecute :public Execute {
public:
bool bTrain;
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
inline PExecute LinearUniNode::generate(bool bTrain, dtype cur_drop_factor) {
LinearUniExecute* exec = new LinearUniExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
return exec;
};
class LinearExecute :public Execute {
public:
bool bTrain;
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
inline PExecute LinearNode::generate(bool bTrain, dtype cur_drop_factor) {
LinearExecute* exec = new LinearExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
return exec;
};
#endif /* UNIOP_H_ */
|
convolution_winograd_transform_packn.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_input_packn_rvv(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 6;
const int h_tiles = (h - 2) / 6;
const int tiles = w_tiles * h_tiles;
// const float itm[8][8] = {
// {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f},
//
// {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f},
// {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f},
//
// {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f},
// {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f},
//
// {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f},
// {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f},
//
// {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f}
// };
// 0 = r00 - r06 + (r04 - r02) * 5.25
// 7 = r07 - r01 + (r03 - r05) * 5.25
// 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05)
// 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05)
// 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2)
// reuse r04 * 1.25
// reuse r03 * 2.5
// 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5)
// 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5)
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
// NOTE c99 variable length array
float tmp[8][8][packn];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 6) + (j * 6) * packn;
for (int m = 0; m < 8; m++)
{
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _r04 = vle32_v_f32m1(r0 + packn * 4, vl);
vfloat32m1_t _r05 = vle32_v_f32m1(r0 + packn * 5, vl);
vfloat32m1_t _r06 = vle32_v_f32m1(r0 + packn * 6, vl);
vfloat32m1_t _r07 = vle32_v_f32m1(r0 + packn * 7, vl);
vfloat32m1_t _tmp0m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r00, _r06, vl), 5.25f, vfsub_vv_f32m1(_r04, _r02, vl), vl);
vfloat32m1_t _tmp7m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r07, _r01, vl), 5.25f, vfsub_vv_f32m1(_r03, _r05, vl), vl);
vse32_v_f32m1(tmp[0][m], _tmp0m, vl);
vse32_v_f32m1(tmp[7][m], _tmp7m, vl);
vfloat32m1_t _tmp12a = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r02, _r06, vl), -4.25f, _r04, vl);
vfloat32m1_t _tmp12b = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r01, _r05, vl), -4.25f, _r03, vl);
vfloat32m1_t _tmp1m = vfadd_vv_f32m1(_tmp12a, _tmp12b, vl);
vfloat32m1_t _tmp2m = vfsub_vv_f32m1(_tmp12a, _tmp12b, vl);
vse32_v_f32m1(tmp[1][m], _tmp1m, vl);
vse32_v_f32m1(tmp[2][m], _tmp2m, vl);
vfloat32m1_t _tmp34a = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_r06, 0.25f, _r02, vl), -1.25f, _r04, vl);
vfloat32m1_t _tmp34b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_r01, 0.5f, vl), -2.5f, _r03, vl), 2.f, _r05, vl);
vfloat32m1_t _tmp3m = vfadd_vv_f32m1(_tmp34a, _tmp34b, vl);
vfloat32m1_t _tmp4m = vfsub_vv_f32m1(_tmp34a, _tmp34b, vl);
vse32_v_f32m1(tmp[3][m], _tmp3m, vl);
vse32_v_f32m1(tmp[4][m], _tmp4m, vl);
vfloat32m1_t _tmp56a = vfmacc_vf_f32m1(_r06, 4.f, vfmacc_vf_f32m1(_r02, -1.25f, _r04, vl), vl);
vfloat32m1_t _tmp56b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_r01, 2.f, vl), -2.5f, _r03, vl), 0.5f, _r05, vl);
vfloat32m1_t _tmp5m = vfadd_vv_f32m1(_tmp56a, _tmp56b, vl);
vfloat32m1_t _tmp6m = vfsub_vv_f32m1(_tmp56a, _tmp56b, vl);
vse32_v_f32m1(tmp[5][m], _tmp5m, vl);
vse32_v_f32m1(tmp[6][m], _tmp6m, vl);
r0 += w * packn;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * packn;
float* r0_tm_1 = r0_tm_0 + tiles * packn;
float* r0_tm_2 = r0_tm_0 + tiles * packn * 2;
float* r0_tm_3 = r0_tm_0 + tiles * packn * 3;
float* r0_tm_4 = r0_tm_0 + tiles * packn * 4;
float* r0_tm_5 = r0_tm_0 + tiles * packn * 5;
float* r0_tm_6 = r0_tm_0 + tiles * packn * 6;
float* r0_tm_7 = r0_tm_0 + tiles * packn * 7;
for (int m = 0; m < 8; m++)
{
vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl);
vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl);
vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl);
vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl);
vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl);
vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl);
vfloat32m1_t _tmp06 = vle32_v_f32m1(tmp[m][6], vl);
vfloat32m1_t _tmp07 = vle32_v_f32m1(tmp[m][7], vl);
vfloat32m1_t _r0tm0 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp00, _tmp06, vl), 5.25f, vfsub_vv_f32m1(_tmp04, _tmp02, vl), vl);
vfloat32m1_t _r0tm7 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp07, _tmp01, vl), 5.25f, vfsub_vv_f32m1(_tmp03, _tmp05, vl), vl);
vfloat32m1_t _tmp12a = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp02, _tmp06, vl), -4.25f, _tmp04, vl);
vfloat32m1_t _tmp12b = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp01, _tmp05, vl), -4.25f, _tmp03, vl);
vfloat32m1_t _r0tm1 = vfadd_vv_f32m1(_tmp12a, _tmp12b, vl);
vfloat32m1_t _r0tm2 = vfsub_vv_f32m1(_tmp12a, _tmp12b, vl);
vfloat32m1_t _tmp34a = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp06, 0.25f, _tmp02, vl), -1.25f, _tmp04, vl);
vfloat32m1_t _tmp34b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_tmp01, 0.5f, vl), -2.5f, _tmp03, vl), 2.f, _tmp05, vl);
vfloat32m1_t _r0tm3 = vfadd_vv_f32m1(_tmp34a, _tmp34b, vl);
vfloat32m1_t _r0tm4 = vfsub_vv_f32m1(_tmp34a, _tmp34b, vl);
vfloat32m1_t _tmp56a = vfmacc_vf_f32m1(_tmp06, 4.f, vfmacc_vf_f32m1(_tmp02, -1.25f, _tmp04, vl), vl);
vfloat32m1_t _tmp56b = vfmacc_vf_f32m1(vfmacc_vf_f32m1(vfmul_vf_f32m1(_tmp01, 2.f, vl), -2.5f, _tmp03, vl), 0.5f, _tmp05, vl);
vfloat32m1_t _r0tm5 = vfadd_vv_f32m1(_tmp56a, _tmp56b, vl);
vfloat32m1_t _r0tm6 = vfsub_vv_f32m1(_tmp56a, _tmp56b, vl);
vse32_v_f32m1(r0_tm_0, _r0tm0, vl);
vse32_v_f32m1(r0_tm_1, _r0tm1, vl);
vse32_v_f32m1(r0_tm_2, _r0tm2, vl);
vse32_v_f32m1(r0_tm_3, _r0tm3, vl);
vse32_v_f32m1(r0_tm_4, _r0tm4, vl);
vse32_v_f32m1(r0_tm_5, _r0tm5, vl);
vse32_v_f32m1(r0_tm_6, _r0tm6, vl);
vse32_v_f32m1(r0_tm_7, _r0tm7, vl);
r0_tm_0 += tiles * packn * 8;
r0_tm_1 += tiles * packn * 8;
r0_tm_2 += tiles * packn * 8;
r0_tm_3 += tiles * packn * 8;
r0_tm_4 += tiles * packn * 8;
r0_tm_5 += tiles * packn * 8;
r0_tm_6 += tiles * packn * 8;
r0_tm_7 += tiles * packn * 8;
}
}
}
}
}
static void conv3x3s1_winograd63_transform_output_packn_rvv(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 6;
const int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[6][8] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f}
// };
// 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32
// 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16
// 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8
// 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4
// 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2
// 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6)
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
vfloat32m1_t _bias0 = biasptr ? vle32_v_f32m1(biasptr + p * packn, vl) : vfmv_v_f_f32m1(0.f, vl);
// NOTE c99 variable length array
float tmp[6][8][packn];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * packn;
const float* output0_tm_1 = output0_tm_0 + tiles * packn;
const float* output0_tm_2 = output0_tm_0 + tiles * packn * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * packn * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * packn * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * packn * 5;
const float* output0_tm_6 = output0_tm_0 + tiles * packn * 6;
const float* output0_tm_7 = output0_tm_0 + tiles * packn * 7;
float* output0 = out0.row(i * 6) + (j * 6) * packn;
for (int m = 0; m < 8; m++)
{
vfloat32m1_t _out0tm0 = vle32_v_f32m1(output0_tm_0, vl);
vfloat32m1_t _out0tm1 = vle32_v_f32m1(output0_tm_1, vl);
vfloat32m1_t _out0tm2 = vle32_v_f32m1(output0_tm_2, vl);
vfloat32m1_t _out0tm3 = vle32_v_f32m1(output0_tm_3, vl);
vfloat32m1_t _out0tm4 = vle32_v_f32m1(output0_tm_4, vl);
vfloat32m1_t _out0tm5 = vle32_v_f32m1(output0_tm_5, vl);
vfloat32m1_t _out0tm6 = vle32_v_f32m1(output0_tm_6, vl);
vfloat32m1_t _out0tm7 = vle32_v_f32m1(output0_tm_7, vl);
vfloat32m1_t _tmp024a = vfadd_vv_f32m1(_out0tm1, _out0tm2, vl);
vfloat32m1_t _tmp135a = vfsub_vv_f32m1(_out0tm1, _out0tm2, vl);
vfloat32m1_t _tmp024b = vfadd_vv_f32m1(_out0tm3, _out0tm4, vl);
vfloat32m1_t _tmp135b = vfsub_vv_f32m1(_out0tm3, _out0tm4, vl);
vfloat32m1_t _tmp024c = vfadd_vv_f32m1(_out0tm5, _out0tm6, vl);
vfloat32m1_t _tmp135c = vfsub_vv_f32m1(_out0tm5, _out0tm6, vl);
vfloat32m1_t _tmp0m = vfadd_vv_f32m1(vfadd_vv_f32m1(_out0tm0, _tmp024a, vl), vfmacc_vf_f32m1(_tmp024b, 32.f, _tmp024c, vl), vl);
vfloat32m1_t _tmp2m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl);
vfloat32m1_t _tmp4m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl);
vse32_v_f32m1(tmp[0][m], _tmp0m, vl);
vse32_v_f32m1(tmp[2][m], _tmp2m, vl);
vse32_v_f32m1(tmp[4][m], _tmp4m, vl);
vfloat32m1_t _tmp1m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl);
vfloat32m1_t _tmp3m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl);
vfloat32m1_t _tmp5m = vfadd_vv_f32m1(vfadd_vv_f32m1(_out0tm7, _tmp135a, vl), vfmacc_vf_f32m1(_tmp135c, 32.f, _tmp135b, vl), vl);
vse32_v_f32m1(tmp[1][m], _tmp1m, vl);
vse32_v_f32m1(tmp[3][m], _tmp3m, vl);
vse32_v_f32m1(tmp[5][m], _tmp5m, vl);
output0_tm_0 += tiles * packn * 8;
output0_tm_1 += tiles * packn * 8;
output0_tm_2 += tiles * packn * 8;
output0_tm_3 += tiles * packn * 8;
output0_tm_4 += tiles * packn * 8;
output0_tm_5 += tiles * packn * 8;
output0_tm_6 += tiles * packn * 8;
output0_tm_7 += tiles * packn * 8;
}
for (int m = 0; m < 6; m++)
{
vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl);
vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl);
vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl);
vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl);
vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl);
vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl);
vfloat32m1_t _tmp06 = vle32_v_f32m1(tmp[m][6], vl);
vfloat32m1_t _tmp07 = vle32_v_f32m1(tmp[m][7], vl);
vfloat32m1_t _tmp024a = vfadd_vv_f32m1(_tmp01, _tmp02, vl);
vfloat32m1_t _tmp135a = vfsub_vv_f32m1(_tmp01, _tmp02, vl);
vfloat32m1_t _tmp024b = vfadd_vv_f32m1(_tmp03, _tmp04, vl);
vfloat32m1_t _tmp135b = vfsub_vv_f32m1(_tmp03, _tmp04, vl);
vfloat32m1_t _tmp024c = vfadd_vv_f32m1(_tmp05, _tmp06, vl);
vfloat32m1_t _tmp135c = vfsub_vv_f32m1(_tmp05, _tmp06, vl);
vfloat32m1_t _out00 = vfadd_vv_f32m1(_bias0, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp00, _tmp024a, vl), vfmacc_vf_f32m1(_tmp024b, 32.f, _tmp024c, vl), vl), vl);
vfloat32m1_t _out02 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 4.f, _tmp024b, vl), 8.f, _tmp024c, vl), vl);
vfloat32m1_t _out04 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp024a, 16.f, _tmp024b, vl), 2.f, _tmp024c, vl), vl);
vse32_v_f32m1(output0, _out00, vl);
vse32_v_f32m1(output0 + packn * 2, _out02, vl);
vse32_v_f32m1(output0 + packn * 4, _out04, vl);
vfloat32m1_t _out01 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 2.f, _tmp135b, vl), 16.f, _tmp135c, vl), vl);
vfloat32m1_t _out03 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp135a, 8.f, _tmp135b, vl), 4.f, _tmp135c, vl), vl);
vfloat32m1_t _out05 = vfadd_vv_f32m1(_bias0, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp07, _tmp135a, vl), vfmacc_vf_f32m1(_tmp135c, 32.f, _tmp135b, vl), vl), vl);
vse32_v_f32m1(output0 + packn, _out01, vl);
vse32_v_f32m1(output0 + packn * 3, _out03, vl);
vse32_v_f32m1(output0 + packn * 5, _out05, vl);
output0 += outw * packn;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_input_packn_rvv(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
const int w = bottom_blob.w;
const int h = bottom_blob.h;
const int inch = bottom_blob.c;
const int w_tiles = (w - 2) / 4;
const int h_tiles = (h - 2) / 4;
const int tiles = w_tiles * h_tiles;
// const float itm[6][6] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
// NOTE c99 variable length array
float tmp[6][6][packn];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* r0 = img0.row(i * 4) + (j * 4) * packn;
for (int m = 0; m < 6; m++)
{
vfloat32m1_t _r00 = vle32_v_f32m1(r0, vl);
vfloat32m1_t _r01 = vle32_v_f32m1(r0 + packn, vl);
vfloat32m1_t _r02 = vle32_v_f32m1(r0 + packn * 2, vl);
vfloat32m1_t _r03 = vle32_v_f32m1(r0 + packn * 3, vl);
vfloat32m1_t _r04 = vle32_v_f32m1(r0 + packn * 4, vl);
vfloat32m1_t _r05 = vle32_v_f32m1(r0 + packn * 5, vl);
vfloat32m1_t _tmp0m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_r04, 4.f, _r00, vl), -5.f, _r02, vl);
vfloat32m1_t _tmp1m = vfmacc_vf_f32m1(vfadd_vv_f32m1(_r04, _r03, vl), -4.f, vfadd_vv_f32m1(_r01, _r02, vl), vl);
vfloat32m1_t _tmp2m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r04, _r03, vl), 4.f, vfsub_vv_f32m1(_r01, _r02, vl), vl);
vfloat32m1_t _tmp3m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r04, _r02, vl), -2.f, vfsub_vv_f32m1(_r01, _r03, vl), vl);
vfloat32m1_t _tmp4m = vfmacc_vf_f32m1(vfsub_vv_f32m1(_r04, _r02, vl), 2.f, vfsub_vv_f32m1(_r01, _r03, vl), vl);
vfloat32m1_t _tmp5m = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_r05, 4.f, _r01, vl), -5.f, _r03, vl);
vse32_v_f32m1(tmp[0][m], _tmp0m, vl);
vse32_v_f32m1(tmp[1][m], _tmp1m, vl);
vse32_v_f32m1(tmp[2][m], _tmp2m, vl);
vse32_v_f32m1(tmp[3][m], _tmp3m, vl);
vse32_v_f32m1(tmp[4][m], _tmp4m, vl);
vse32_v_f32m1(tmp[5][m], _tmp5m, vl);
r0 += w * packn;
}
float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * packn;
float* r0_tm_1 = r0_tm_0 + tiles * packn;
float* r0_tm_2 = r0_tm_0 + tiles * packn * 2;
float* r0_tm_3 = r0_tm_0 + tiles * packn * 3;
float* r0_tm_4 = r0_tm_0 + tiles * packn * 4;
float* r0_tm_5 = r0_tm_0 + tiles * packn * 5;
for (int m = 0; m < 6; m++)
{
vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl);
vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl);
vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl);
vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl);
vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl);
vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl);
vfloat32m1_t _r0tm0 = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp04, 4.f, _tmp00, vl), -5.f, _tmp02, vl);
vfloat32m1_t _r0tm1 = vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp04, _tmp03, vl), -4.f, vfadd_vv_f32m1(_tmp01, _tmp02, vl), vl);
vfloat32m1_t _r0tm2 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp04, _tmp03, vl), 4.f, vfsub_vv_f32m1(_tmp01, _tmp02, vl), vl);
vfloat32m1_t _r0tm3 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp04, _tmp02, vl), -2.f, vfsub_vv_f32m1(_tmp01, _tmp03, vl), vl);
vfloat32m1_t _r0tm4 = vfmacc_vf_f32m1(vfsub_vv_f32m1(_tmp04, _tmp02, vl), 2.f, vfsub_vv_f32m1(_tmp01, _tmp03, vl), vl);
vfloat32m1_t _r0tm5 = vfmacc_vf_f32m1(vfmacc_vf_f32m1(_tmp05, 4.f, _tmp01, vl), -5.f, _tmp03, vl);
vse32_v_f32m1(r0_tm_0, _r0tm0, vl);
vse32_v_f32m1(r0_tm_1, _r0tm1, vl);
vse32_v_f32m1(r0_tm_2, _r0tm2, vl);
vse32_v_f32m1(r0_tm_3, _r0tm3, vl);
vse32_v_f32m1(r0_tm_4, _r0tm4, vl);
vse32_v_f32m1(r0_tm_5, _r0tm5, vl);
r0_tm_0 += tiles * packn * 6;
r0_tm_1 += tiles * packn * 6;
r0_tm_2 += tiles * packn * 6;
r0_tm_3 += tiles * packn * 6;
r0_tm_4 += tiles * packn * 6;
r0_tm_5 += tiles * packn * 6;
}
}
}
}
}
static void conv3x3s1_winograd43_transform_output_packn_rvv(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt)
{
const int packn = csrr_vlenb() / 4;
const word_type vl = vsetvl_e32m1(packn);
const int outw = top_blob.w;
const int outh = top_blob.h;
const int outch = top_blob.c;
const int w_tiles = outw / 4;
const int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
const float* biasptr = bias;
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob.channel(p);
vfloat32m1_t _bias0 = biasptr ? vle32_v_f32m1(biasptr + p * packn, vl) : vfmv_v_f_f32m1(0.f, vl);
// NOTE variable length array
float tmp[4][6][packn];
// tile
for (int i = 0; i < h_tiles; i++)
{
for (int j = 0; j < w_tiles; j++)
{
const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * packn;
const float* output0_tm_1 = output0_tm_0 + tiles * packn;
const float* output0_tm_2 = output0_tm_0 + tiles * packn * 2;
const float* output0_tm_3 = output0_tm_0 + tiles * packn * 3;
const float* output0_tm_4 = output0_tm_0 + tiles * packn * 4;
const float* output0_tm_5 = output0_tm_0 + tiles * packn * 5;
float* output0 = out0.row(i * 4) + (j * 4) * packn;
for (int m = 0; m < 6; m++)
{
vfloat32m1_t _out0tm0 = vle32_v_f32m1(output0_tm_0, vl);
vfloat32m1_t _out0tm1 = vle32_v_f32m1(output0_tm_1, vl);
vfloat32m1_t _out0tm2 = vle32_v_f32m1(output0_tm_2, vl);
vfloat32m1_t _out0tm3 = vle32_v_f32m1(output0_tm_3, vl);
vfloat32m1_t _out0tm4 = vle32_v_f32m1(output0_tm_4, vl);
vfloat32m1_t _out0tm5 = vle32_v_f32m1(output0_tm_5, vl);
vfloat32m1_t _tmp02a = vfadd_vv_f32m1(_out0tm1, _out0tm2, vl);
vfloat32m1_t _tmp13a = vfsub_vv_f32m1(_out0tm1, _out0tm2, vl);
vfloat32m1_t _tmp02b = vfadd_vv_f32m1(_out0tm3, _out0tm4, vl);
vfloat32m1_t _tmp13b = vfsub_vv_f32m1(_out0tm3, _out0tm4, vl);
vfloat32m1_t _tmp0m = vfadd_vv_f32m1(vfadd_vv_f32m1(_out0tm0, _tmp02a, vl), _tmp02b, vl);
vfloat32m1_t _tmp1m = vfmacc_vf_f32m1(_tmp13a, 2.f, _tmp13b, vl);
vfloat32m1_t _tmp2m = vfmacc_vf_f32m1(_tmp02a, 4.f, _tmp02b, vl);
vfloat32m1_t _tmp3m = vfmacc_vf_f32m1(vfadd_vv_f32m1(_out0tm5, _tmp13a, vl), 8.f, _tmp13b, vl);
vse32_v_f32m1(tmp[0][m], _tmp0m, vl);
vse32_v_f32m1(tmp[1][m], _tmp1m, vl);
vse32_v_f32m1(tmp[2][m], _tmp2m, vl);
vse32_v_f32m1(tmp[3][m], _tmp3m, vl);
output0_tm_0 += tiles * packn * 6;
output0_tm_1 += tiles * packn * 6;
output0_tm_2 += tiles * packn * 6;
output0_tm_3 += tiles * packn * 6;
output0_tm_4 += tiles * packn * 6;
output0_tm_5 += tiles * packn * 6;
}
for (int m = 0; m < 4; m++)
{
vfloat32m1_t _tmp00 = vle32_v_f32m1(tmp[m][0], vl);
vfloat32m1_t _tmp01 = vle32_v_f32m1(tmp[m][1], vl);
vfloat32m1_t _tmp02 = vle32_v_f32m1(tmp[m][2], vl);
vfloat32m1_t _tmp03 = vle32_v_f32m1(tmp[m][3], vl);
vfloat32m1_t _tmp04 = vle32_v_f32m1(tmp[m][4], vl);
vfloat32m1_t _tmp05 = vle32_v_f32m1(tmp[m][5], vl);
vfloat32m1_t _tmp02a = vfadd_vv_f32m1(_tmp01, _tmp02, vl);
vfloat32m1_t _tmp13a = vfsub_vv_f32m1(_tmp01, _tmp02, vl);
vfloat32m1_t _tmp02b = vfadd_vv_f32m1(_tmp03, _tmp04, vl);
vfloat32m1_t _tmp13b = vfsub_vv_f32m1(_tmp03, _tmp04, vl);
vfloat32m1_t _out00 = vfadd_vv_f32m1(_bias0, vfadd_vv_f32m1(vfadd_vv_f32m1(_tmp00, _tmp02a, vl), _tmp02b, vl), vl);
vfloat32m1_t _out01 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(_tmp13a, 2.f, _tmp13b, vl), vl);
vfloat32m1_t _out02 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(_tmp02a, 4.f, _tmp02b, vl), vl);
vfloat32m1_t _out03 = vfadd_vv_f32m1(_bias0, vfmacc_vf_f32m1(vfadd_vv_f32m1(_tmp05, _tmp13a, vl), 8.f, _tmp13b, vl), vl);
vse32_v_f32m1(output0, _out00, vl);
vse32_v_f32m1(output0 + packn, _out01, vl);
vse32_v_f32m1(output0 + packn * 2, _out02, vl);
vse32_v_f32m1(output0 + packn * 3, _out03, vl);
output0 += outw * packn;
}
}
}
}
}
|
yolov2_forward_network_quantized.c | #include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h
#include <stdlib.h>
// softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h
#define GEMMCONV
//#define SSE41
//#undef AVX
#define MAX_VAL_8 (256/2 - 1) // 7-bit (1-bit sign)
#define MAX_VAL_16 (256*256/2 - 1) // 15-bit (1-bit sign)
#define MAX_VAL_32 (256*256*256*256/2 - 1) // 31-bit (1-bit sign)
float scalefactor;
int max_abs(int src, int max_val)
{
if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val;
return src;
}
short int max_abs_short(short int src, short int max_val)
{
if (abs(src) > abs(max_val)) src = (src > 0) ? max_val : -max_val;
return src;
}
// im2col.c
int8_t im2col_get_pixel_int8(int8_t* im, int height, int width, int channels,
int row, int col, int channel, int pad)
{
row -= pad;
col -= pad;
if (row < 0 || col < 0 ||
row >= height || col >= width) return 0;
return im[col + width * (row + height * channel)];
}
// im2col.c
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_int8(int8_t* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, int8_t* data_col)
{
int c, h, w;
int height_col = (height + 2 * pad - ksize) / stride + 1;
int width_col = (width + 2 * pad - ksize) / stride + 1;
int channels_col = channels * ksize * ksize;
for (c = 0; c < channels_col; ++c) {
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel_int8(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
void gemm_nn_int8_int16(int M, int N, int K, int8_t ALPHA,
int8_t* A, int lda,
int8_t* B, int ldb,
int16_t* C, int ldc)
{
int32_t* c_tmp = calloc(N, sizeof(int32_t));
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register int16_t A_PART = ALPHA * A[i * lda + k];
//#pragma simd parallel for
for (j = 0; j < N; ++j) {
c_tmp[j] += A_PART * B[k * ldb + j];
}
}
for (j = 0; j < N; ++j) {
C[i * ldc + j] += max_abs(c_tmp[j], MAX_VAL_16);
c_tmp[j] = 0;
}
}
free(c_tmp);
}
void gemm_nn_int8_int32(int M, int N, int K, int8_t ALPHA,
int8_t* A, int lda,
int8_t* B, int ldb,
int32_t* C, int ldc)
{
int32_t* c_tmp = calloc(N, sizeof(int32_t));
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
register int16_t A_PART = ALPHA * A[i * lda + k];
//#pragma simd parallel for
for (j = 0; j < N; ++j) {
c_tmp[j] += A_PART * B[k * ldb + j];
}
}
for (j = 0; j < N; ++j) {
C[i * ldc + j] += max_abs(c_tmp[j], MAX_VAL_32);
c_tmp[j] = 0;
}
}
free(c_tmp);
}
int compare(void* first, void* second)
{
if (*(float*)first > *(float*)second)
return 1;
else if (*(float*)first < *(float*)second)
return -1;
else
return 0;
}
void sortarr(float arr[], int count)
{
qsort(arr, count, sizeof(float), compare);
}
float getmulti(float arr[], int cut, int count) {
float minmax = arr[count - cut - 1] - arr[cut];
printf("minmax:%f \n", minmax);
float fmulti = scalefactor / minmax;
float multi = floor(fmulti);
return multi;
}
void forward_convolutional_layer_q(layer l, network_state state)
{
int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1
int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1
int i, j;
int const out_size = out_h * out_w;
typedef int16_t conv_t; // l.output
conv_t* output_q = calloc(l.outputs, sizeof(conv_t));
state.input_int8 = (int8_t*)calloc(l.inputs, sizeof(int));
int z;
for (z = 0; z < l.inputs; ++z) {
int16_t src = state.input[z] * l.input_quant_multiplier;
state.input_int8[z] = max_abs(src, MAX_VAL_8);
}
// Convolution
int m = l.n;
int k = l.size * l.size * l.c;
int n = out_h * out_w;
int8_t* a = l.weights_int8;
int8_t* b = (int8_t*)state.workspace;
conv_t* c = output_q; // int16_t
// Use GEMM (as part of BLAS)
im2col_cpu_int8(state.input_int8, l.c, l.h, l.w, l.size, l.stride, l.pad, b);
int t; // multi-thread gemm
#pragma omp parallel for
for (t = 0; t < m; ++t) {
gemm_nn_int8_int16(1, n, k, 1, a + t * k, k, b, n, c + t * n, n);
}
free(state.input_int8);
// Bias addition
int fil;
for (fil = 0; fil < l.n; ++fil) {
for (j = 0; j < out_size; ++j) {
output_q[fil * out_size + j] = output_q[fil * out_size + j] + l.biases_quant[fil];
}
}
// Activation
if (l.activation == LEAKY) {
for (i = 0; i < l.n * out_size; ++i) {
output_q[i] = (output_q[i] > 0) ? output_q[i] : output_q[i] / 10;
}
}
// De-scaling
float ALPHA1 = 1 / (l.input_quant_multiplier * l.weights_quant_multiplier);
for (i = 0; i < l.outputs; ++i) {
l.output[i] = output_q[i] * ALPHA1;
}
free(output_q);
}
void yolov2_forward_network_q(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for (i = 0; i < net.n; ++i) {
state.index = i;
layer l = net.layers[i];
if (l.type == CONVOLUTIONAL) {
forward_convolutional_layer_q(l, state);
}
else if (l.type == MAXPOOL) {
forward_maxpool_layer_cpu(l, state);
}
else if (l.type == ROUTE) {
forward_route_layer_cpu(l, state);
}
else if (l.type == REORG) {
forward_reorg_layer_cpu(l, state);
}
else if (l.type == UPSAMPLE) {
forward_upsample_layer_cpu(l, state);
}
else if (l.type == SHORTCUT) {
forward_shortcut_layer_cpu(l, state);
}
else if (l.type == YOLO) {
forward_yolo_layer_cpu(l, state);
}
else if (l.type == REGION) {
forward_region_layer_cpu(l, state);
}
else {
printf("\n layer: %d \n", l.type);
}
state.input = l.output;
}
}
// detect on CPU
float* network_predict_quantized(network net, float* input)
{
network_state state;
state.net = net;
state.index = 0;
state.input = input;
state.truth = 0;
state.train = 0;
state.delta = 0;
yolov2_forward_network_q(net, state); // network on CPU
//float *out = get_network_output(net);
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
return net.layers[i].output;
}
/* Quantization-related */
void do_quantization(network net) {
int counter = 0;
int j;
for (j = 0; j < net.n; ++j) {
layer* l = &net.layers[j];
/*
TODO: implement quantization
The implementation given below is a naive version of per-network quantization; implement your own quantization that minimizes the mAP degradation
*/
printf("\n");
if (l->type == CONVOLUTIONAL) { // Quantize conv layer only
size_t const weights_size = l->size * l->size * l->c * l->n;
size_t const filter_size = l->size * l->size * l->c;
int i, fil, count;
// Input Scaling
if (counter >= net.input_calibration_size) {
printf(" Warning: CONV%d has no corresponding input_calibration parameter - default value 16 will be used;\n", j);
}
l->input_quant_multiplier = (counter < net.input_calibration_size) ? net.input_calibration[counter] : 16; // Using 16 as input_calibration as default value
// l->input_quant_multiplier = floor(l->input_quant_multiplier*pow(2,12))/pow(2,12);
++counter;
float* arr = malloc(sizeof(float) * weights_size);
for (int ka = 0; ka < weights_size; ka++)
{
arr[ka] = l->weights[ka];
}
scalefactor = 91.3;
count = weights_size;
int cut = 0;
sortarr(arr, count);
l->weights_quant_multiplier = getmulti(arr, cut, count);
if (counter == 1) {
l->weights_quant_multiplier = l->weights_quant_multiplier * 256.032 / scalefactor;
}
// Weight Quantization
//l->weights_quant_multiplier = 32; // Arbitrarily set to 32; you should devise your own method to calculate the weight multiplier
for (fil = 0; fil < l->n; ++fil) {
for (i = 0; i < filter_size; ++i) {
float w = l->weights[fil * filter_size + i] * l->weights_quant_multiplier; // Scale
l->weights_int8[fil * filter_size + i] = max_abs(w, MAX_VAL_8); // Clip
}
}
// Bias Quantization
float biases_multiplier = (l->weights_quant_multiplier * l->input_quant_multiplier);
for (fil = 0; fil < l->n; ++fil) {
float b = l->biases[fil] * biases_multiplier; // Scale
l->biases_quant[fil] = max_abs(b, MAX_VAL_16); // Clip
}
printf(" CONV%d multipliers: input %g, weights %g, bias %g \n", j, l->input_quant_multiplier, l->weights_quant_multiplier, biases_multiplier);
}
else {
printf(" No quantization for layer %d (layer type: %d) \n", j, l->type);
}
}
}
// Save quantized weights, bias, and scale
void save_quantized_model(network net) {
int j;
for (j = 0; j < net.n; ++j) {
layer* l = &net.layers[j];
if (l->type == CONVOLUTIONAL) {
size_t const weights_size = l->size * l->size * l->c * l->n;
size_t const filter_size = l->size * l->size * l->c;
printf(" Saving quantized weights, bias, and scale for CONV%d \n", j);
char weightfile[30];
char biasfile[30];
char scalefile[30];
sprintf(weightfile, "weights/CONV%d_W.txt", j);
sprintf(biasfile, "weights/CONV%d_B.txt", j);
sprintf(scalefile, "weights/CONV%d_S.txt", j);
int k;
FILE* fp_w = fopen(weightfile, "w");
for (k = 0; k < weights_size; k = k + 4) {
uint8_t first = k < weights_size ? l->weights_int8[k] : 0;
uint8_t second = k + 1 < weights_size ? l->weights_int8[k + 1] : 0;
uint8_t third = k + 2 < weights_size ? l->weights_int8[k + 2] : 0;
uint8_t fourth = k + 3 < weights_size ? l->weights_int8[k + 3] : 0;
fprintf(fp_w, "%02x%02x%02x%02x\n", first, second, third, fourth);
}
fclose(fp_w);
FILE* fp_b = fopen(biasfile, "w");
for (k = 0; k < l->n; k = k + 4) {
uint16_t first = k < l->n ? l->biases_quant[k] : 0;
uint16_t second = k + 1 < l->n ? l->biases_quant[k + 1] : 0;
fprintf(fp_b, "%04x%04x\n", first, second);
}
fclose(fp_b);
FILE* fp_s = fopen(scalefile, "w");
fprintf(fp_s, "%f\n", l->input_quant_multiplier);
fclose(fp_s);
}
}
} |
Example_atomic_restrict.2.c | /*
* @@name: atomic_restrict.2c
* @@type: C
* @@compilable: maybe
* @@linkable: no
* @@expect: failure
* @@version: omp_3.1
*/
void atomic_wrong2 ()
{
int x;
int *i;
float *r;
i = &x;
r = (float *)&x;
#pragma omp parallel
{
#pragma omp atomic update
*i += 1;
#pragma omp atomic update
*r += 1.0;
/* Incorrect because the atomic constructs reference the same location
through incompatible types */
}
}
|
base_mortar_criteria.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_BASE_MORTAR_CRITERIA_H)
#define KRATOS_BASE_MORTAR_CRITERIA_H
/* System includes */
/* External includes */
/* Project includes */
#include "contact_structural_mechanics_application_variables.h"
#include "custom_utilities/contact_utilities.h"
#include "utilities/mortar_utilities.h"
#include "utilities/variable_utils.h"
#include "custom_processes/aalm_adapt_penalty_value_process.h"
#include "custom_processes/compute_dynamic_factor_process.h"
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
// DEBUG
#include "includes/gid_io.h"
namespace Kratos
{
///@addtogroup ContactStructuralMechanicsApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class BaseMortarConvergenceCriteria
* @ingroup ContactStructuralMechanicsApplication
* @brief Custom convergence criteria for the mortar condition
* @author Vicente Mataix Ferrandiz
*/
template<class TSparseSpace, class TDenseSpace>
class BaseMortarConvergenceCriteria
: public ConvergenceCriteria< TSparseSpace, TDenseSpace >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of BaseMortarConvergenceCriteria
KRATOS_CLASS_POINTER_DEFINITION( BaseMortarConvergenceCriteria );
/// Local Flags
KRATOS_DEFINE_LOCAL_FLAG( COMPUTE_DYNAMIC_FACTOR );
KRATOS_DEFINE_LOCAL_FLAG( IO_DEBUG );
KRATOS_DEFINE_LOCAL_FLAG( PURE_SLIP );
/// The base class definition (and it subclasses)
typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
/// The sparse space used
typedef TSparseSpace SparseSpaceType;
/// The components containers
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef GidIO<> GidIOBaseType;
///@}
///@name Life Cycle
///@{
/// Default constructors
explicit BaseMortarConvergenceCriteria(
const bool ComputeDynamicFactor = false,
const bool IODebug = false,
const bool PureSlip = false
)
: ConvergenceCriteria< TSparseSpace, TDenseSpace >(),
mpIO(nullptr)
{
// Set local flags
mOptions.Set(BaseMortarConvergenceCriteria::COMPUTE_DYNAMIC_FACTOR, ComputeDynamicFactor);
mOptions.Set(BaseMortarConvergenceCriteria::IO_DEBUG, IODebug);
mOptions.Set(BaseMortarConvergenceCriteria::PURE_SLIP, PureSlip);
if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) {
mpIO = Kratos::make_shared<GidIOBaseType>("POST_LINEAR_ITER", GiD_PostBinary, SingleFile, WriteUndeformed, WriteElementsOnly);
}
}
///Copy constructor
BaseMortarConvergenceCriteria( BaseMortarConvergenceCriteria const& rOther )
:BaseType(rOther),
mOptions(rOther.mOptions),
mpIO(rOther.mpIO)
{
}
/// Destructor
~BaseMortarConvergenceCriteria() override = default;
///@}
///@name Operators
///@{
/**
* @brief Criterias that need to be called before getting the solution
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PreCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
// The contact model part
ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact");
// We update the normals if necessary
const auto normal_variation = r_process_info.Has(CONSIDER_NORMAL_VARIATION) ? static_cast<NormalDerivativesComputation>(r_process_info.GetValue(CONSIDER_NORMAL_VARIATION)) : NO_DERIVATIVES_COMPUTATION;
if (normal_variation != NO_DERIVATIVES_COMPUTATION)
ComputeNodesMeanNormalModelPartWithPairedNormal(rModelPart); // Update normal of the conditions
const bool adapt_penalty = r_process_info.Has(ADAPT_PENALTY) ? r_process_info.GetValue(ADAPT_PENALTY) : false;
const bool dynamic_case = rModelPart.HasNodalSolutionStepVariable(VELOCITY);
/* Compute weighthed gap */
if (adapt_penalty || dynamic_case) {
// Set to zero the weighted gap
ResetWeightedGap(rModelPart);
// Compute the contribution
ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact"));
}
// In dynamic case
if ( dynamic_case && mOptions.Is(BaseMortarConvergenceCriteria::COMPUTE_DYNAMIC_FACTOR)) {
ComputeDynamicFactorProcess compute_dynamic_factor_process( r_contact_model_part );
compute_dynamic_factor_process.Execute();
}
// We recalculate the penalty parameter
if ( adapt_penalty ) {
AALMAdaptPenaltyValueProcess aalm_adaptation_of_penalty( r_contact_model_part );
aalm_adaptation_of_penalty.Execute();
}
return true;
}
/**
* @brief Compute relative and absolute error.
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
* @return true if convergence is achieved, false otherwise
*/
bool PostCriteria(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// We save the current WEIGHTED_GAP in the buffer
NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes();
const auto it_node_begin = r_nodes_array.begin();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(r_nodes_array.size()); ++i) {
auto it_node = it_node_begin + i;
it_node->FastGetSolutionStepValue(WEIGHTED_GAP, 1) = it_node->FastGetSolutionStepValue(WEIGHTED_GAP);
}
// Set to zero the weighted gap
ResetWeightedGap(rModelPart);
// Compute the contribution
ContactUtilities::ComputeExplicitContributionConditions(rModelPart.GetSubModelPart("ComputingContact"));
// GiD IO for debugging
if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) {
const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false;
const int nl_iter = rModelPart.GetProcessInfo()[NL_ITERATION_NUMBER];
const double label = static_cast<double>(nl_iter);
if (nl_iter == 1) {
mpIO->InitializeMesh(label);
mpIO->WriteMesh(rModelPart.GetMesh());
mpIO->FinalizeMesh();
mpIO->InitializeResults(label, rModelPart.GetMesh());
}
mpIO->WriteNodalFlags(INTERFACE, "INTERFACE", rModelPart.Nodes(), label);
mpIO->WriteNodalFlags(ACTIVE, "ACTIVE", rModelPart.Nodes(), label);
mpIO->WriteNodalFlags(SLAVE, "SLAVE", rModelPart.Nodes(), label);
mpIO->WriteNodalFlags(ISOLATED, "ISOLATED", rModelPart.Nodes(), label);
mpIO->WriteNodalResults(NORMAL, rModelPart.Nodes(), label, 0);
mpIO->WriteNodalResultsNonHistorical(DYNAMIC_FACTOR, rModelPart.Nodes(), label);
mpIO->WriteNodalResultsNonHistorical(AUGMENTED_NORMAL_CONTACT_PRESSURE, rModelPart.Nodes(), label);
mpIO->WriteNodalResults(DISPLACEMENT, rModelPart.Nodes(), label, 0);
if (rModelPart.Nodes().begin()->SolutionStepsDataHas(VELOCITY_X)) {
mpIO->WriteNodalResults(VELOCITY, rModelPart.Nodes(), label, 0);
mpIO->WriteNodalResults(ACCELERATION, rModelPart.Nodes(), label, 0);
}
if (r_nodes_array.begin()->SolutionStepsDataHas(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE))
mpIO->WriteNodalResults(LAGRANGE_MULTIPLIER_CONTACT_PRESSURE, rModelPart.Nodes(), label, 0);
else if (r_nodes_array.begin()->SolutionStepsDataHas(VECTOR_LAGRANGE_MULTIPLIER_X))
mpIO->WriteNodalResults(VECTOR_LAGRANGE_MULTIPLIER, rModelPart.Nodes(), label, 0);
mpIO->WriteNodalResults(WEIGHTED_GAP, rModelPart.Nodes(), label, 0);
if (frictional_problem) {
mpIO->WriteNodalFlags(SLIP, "SLIP", rModelPart.Nodes(), label);
mpIO->WriteNodalResults(WEIGHTED_SLIP, rModelPart.Nodes(), label, 0);
mpIO->WriteNodalResultsNonHistorical(AUGMENTED_TANGENT_CONTACT_PRESSURE, rModelPart.Nodes(), label);
}
}
return true;
}
/**
* @brief This function initialize the convergence criteria
* @param rModelPart The model part of interest
*/
void Initialize(ModelPart& rModelPart) override
{
// Calling base criteria
BaseType::Initialize(rModelPart);
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
r_process_info.SetValue(ACTIVE_SET_COMPUTED, false);
}
/**
* @brief This function initializes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void InitializeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Update normal of the conditions
ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact");
MortarUtilities::ComputeNodesMeanNormalModelPart(r_contact_model_part);
const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false;
if (frictional_problem) {
const bool has_lm = rModelPart.HasNodalSolutionStepVariable(VECTOR_LAGRANGE_MULTIPLIER);
if (has_lm && mOptions.IsNot(BaseMortarConvergenceCriteria::PURE_SLIP)) {
MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part);
} else {
MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part, &WEIGHTED_SLIP, 1.0, true);
}
}
// IO for debugging
if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) {
mpIO->CloseResultFile();
std::ostringstream new_name ;
new_name << "POST_LINEAR_ITER_STEP=""POST_LINEAR_ITER_STEP=" << rModelPart.GetProcessInfo()[STEP];
mpIO->ChangeOutputName(new_name.str());
}
}
/**
* @brief This function finalizes the solution step
* @param rModelPart Reference to the ModelPart containing the contact problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual)
*/
void FinalizeSolutionStep(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// IO for debugging
if (mOptions.Is(BaseMortarConvergenceCriteria::IO_DEBUG)) {
mpIO->FinalizeResults();
}
}
/**
* @brief This function finalizes the non-linear iteration
* @param rModelPart Reference to the ModelPart containing the problem.
* @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver)
* @param rA System matrix (unused)
* @param rDx Vector of results (variations on nodal variables)
* @param rb RHS vector (residual + reactions)
*/
void FinalizeNonLinearIteration(
ModelPart& rModelPart,
DofsArrayType& rDofSet,
const TSystemMatrixType& rA,
const TSystemVectorType& rDx,
const TSystemVectorType& rb
) override
{
// Calling base criteria
BaseType::FinalizeNonLinearIteration(rModelPart, rDofSet, rA, rDx, rb);
// The current process info
ProcessInfo& r_process_info = rModelPart.GetProcessInfo();
r_process_info.SetValue(ACTIVE_SET_COMPUTED, false);
}
///@}
///@name Operations
///@{
///@}
///@name Acces
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
Flags mOptions; /// Local flags
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief This method resets the weighted gap in the nodes of the problem
* @param rModelPart Reference to the ModelPart containing the contact problem.
*/
virtual void ResetWeightedGap(ModelPart& rModelPart)
{
NodesArrayType& r_nodes_array = rModelPart.GetSubModelPart("Contact").Nodes();
VariableUtils().SetScalarVar<Variable<double>>(WEIGHTED_GAP, 0.0, r_nodes_array);
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
GidIOBaseType::Pointer mpIO; /// The pointer to the debugging IO
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief It computes the mean of the normal in the condition in all the nodes
* @param rModelPart The model part to compute
*/
inline void ComputeNodesMeanNormalModelPartWithPairedNormal(ModelPart& rModelPart)
{
// Compute normal and tangent
ModelPart& r_contact_model_part = rModelPart.GetSubModelPart("Contact");
MortarUtilities::ComputeNodesMeanNormalModelPart(r_contact_model_part);
const bool frictional_problem = rModelPart.IsDefined(SLIP) ? rModelPart.Is(SLIP) : false;
if (frictional_problem) {
const bool has_lm = rModelPart.HasNodalSolutionStepVariable(VECTOR_LAGRANGE_MULTIPLIER);
if (has_lm && mOptions.IsNot(BaseMortarConvergenceCriteria::PURE_SLIP)) {
MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part);
} else {
MortarUtilities::ComputeNodesTangentModelPart(r_contact_model_part, &WEIGHTED_SLIP, 1.0, true);
}
}
// Iterate over the computing conditions
ModelPart& r_computing_contact_model_part = rModelPart.GetSubModelPart("ComputingContact");
ConditionsArrayType& r_conditions_array = r_computing_contact_model_part.Conditions();
const auto it_cond_begin = r_conditions_array.begin();
#pragma omp parallel for
for(int i = 0; i < static_cast<int>(r_conditions_array.size()); ++i) {
auto it_cond = it_cond_begin + i;
// Aux coordinates
Point::CoordinatesArrayType aux_coords;
// We update the paired normal
GeometryType& this_geometry = it_cond->GetGeometry();
aux_coords = this_geometry.PointLocalCoordinates(aux_coords, this_geometry.Center());
it_cond->SetValue(NORMAL, this_geometry.UnitNormal(aux_coords));
// We update the paired normal
GeometryType::Pointer p_paired_geometry = it_cond->GetValue(PAIRED_GEOMETRY);
aux_coords = p_paired_geometry->PointLocalCoordinates(aux_coords, p_paired_geometry->Center());
it_cond->SetValue(PAIRED_NORMAL, p_paired_geometry->UnitNormal(aux_coords));
}
}
///@}
///@name Private Access
///@{
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Unaccessible methods
///@{
///@}
}; // Class BaseMortarConvergenceCriteria
///@name Local flags creation
///@{
/// Local Flags
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::COMPUTE_DYNAMIC_FACTOR(Kratos::Flags::Create(0));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::NOT_COMPUTE_DYNAMIC_FACTOR(Kratos::Flags::Create(0, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::IO_DEBUG(Kratos::Flags::Create(1));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::NOT_IO_DEBUG(Kratos::Flags::Create(1, false));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::PURE_SLIP(Kratos::Flags::Create(2));
template<class TSparseSpace, class TDenseSpace>
const Kratos::Flags BaseMortarConvergenceCriteria<TSparseSpace, TDenseSpace>::NOT_PURE_SLIP(Kratos::Flags::Create(2, false));
} // namespace Kratos
#endif /* KRATOS_BASE_MORTAR_CRITERIA_H defined */
|
morn_wave_FFT.c | /*
Copyright (C) 2019-2020 JingWeiZhangHuai <jingweizhanghuai@163.com>
Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "morn_wave.h"
#define FFTCACL0(re0,re1) {\
register float re_mul=re1;\
re1=re0-re_mul;\
re0=re0+re_mul;\
}
#define FFTCACL1(re0,im0,re1,im1,re2,im2) {\
register float re_mul=Wre[k]*re1-Wim[k]*im1;\
register float im_mul=Wim[k]*re1+Wre[k]*im1;\
re2=re0-re_mul;im2=-im0+im_mul;\
re0=re0+re_mul;im0= im0+im_mul;\
}
#define FFTCACL2(im0,re1) {\
im0=-re1;\
}
void WaveFFT8(float *fft_re,float *fft_im,float d0,float d4,float d2,float d6,float d1,float d5,float d3,float d7)
{
float data15 = d1-d5;float data37=d3-d7;
float a = 0.70710678118654752440084436210485*(data15-data37);
float b = 0.70710678118654752440084436210485*(data15+data37);
float c = d0-d4;float d = d2-d6;float e = d0+d4;float f = d2+d6;float g = d1+d5;float h = d3+d7;float i=e+f;float j=g+h;
fft_re[0]= i+j; fft_im[0]= 0;
fft_re[1]= c+a; fft_im[1]=-d-b;
fft_re[2]= e-f; fft_im[2]= h-g;
fft_re[3]= c-a; fft_im[3]= d-b;
fft_re[4]= i-j; fft_im[4]= 0;
}
struct HandleWaveFFT {
int size;
float *Wre;
float *Wim;
int *order;
}HandleWaveFFT;
void endWaveFFT(void *info)
{
struct HandleWaveFFT *handle=(struct HandleWaveFFT *)info;
if(handle->Wre != NULL)mFree(handle->Wre);
if(handle->Wim != NULL)mFree(handle->Wim);
if(handle->order!=NULL)mFree(handle->order);
}
#define HASH_WaveFFT 0xf197b3ec
void mWaveFFT(MWave *src,MWave *fft)
{
int i,j,k,n;
mException((INVALID_WAVE(src)),EXIT,"invalid input");
// mException((mInfoGet(&(src->info),"wave_type") != MORN_WAVE_TD),EXIT,"invalid input");
int N;
MHandle *hdl=mHandle(src,WaveFFT);
struct HandleWaveFFT *handle = (struct HandleWaveFFT *)(hdl->handle);
if(hdl->valid == 0)
{
mException((src->size<=4),EXIT,"invalid input");
k=1;while(src->size>(2<<k))k=k+1; N=(2<<k);
if(handle->size != N)
{
handle->size = N;
if(handle->order!=NULL) mFree(handle->order);handle->order=(int *)mMalloc(N*sizeof(int));
N=N>>1;handle->order[0]=0;j=1;
for(k=N;k>0;k=k>>1) {for(i=0;i<j;i++) handle->order[i+j]=handle->order[i]+k; j=j+j;}
if(handle->Wre!=NULL) mFree(handle->Wre);handle->Wre=(float *)mMalloc(N*sizeof(float));
if(handle->Wim!=NULL) mFree(handle->Wim);handle->Wim=(float *)mMalloc(N*sizeof(float));
double n_pi = MORN_PI/((double)N);double thta = n_pi;
handle->Wre[0] = 1.0f; handle->Wim[0] = 0.0f;
for(k=1;k<N;k++)
{
handle->Wre[k] = (float)cos(thta);
handle->Wim[k] = 0.0f-(float)sin(thta);
thta = thta + n_pi;
}
}
hdl->valid = 1;
}
N = handle->size;
float *Wre = handle->Wre; float *Wim = handle->Wim;
MWave *p=fft;
if((fft==NULL)||(fft == src)) fft = mWaveCreate(((src->channel)<<1),N,NULL);
else mWaveRedefine(fft,((src->channel)<<1),N,fft->data);
int wave_type=MORN_WAVE_FD; mPropertyWrite(fft,"wave_type",&wave_type,sizeof(int));
int normalize=MORN_NOT_NORMALIZED;mPropertyWrite(fft,"normalize",&normalize,sizeof(float));
N=(N>>1);
for(int cn=0;cn<src->channel;cn++)
{
float *FFTDataRe = fft->data[(cn<<1)];
float *FFTDataIm = fft->data[(cn<<1)+1];
float *data = src->data[cn];
for(i=0;i<N+N;i+=8)
{
int n0=handle->order[i ];int n1=handle->order[i+1];int n2=handle->order[i+2];int n3=handle->order[i+3];
int n4=handle->order[i+4];int n5=handle->order[i+5];int n6=handle->order[i+6];int n7=handle->order[i+7];
WaveFFT8(FFTDataRe+i,FFTDataIm+i,data[n0],(n1>src->size)?0:data[n1],data[n2],(n3>src->size)?0:data[n3],
data[n4],(n5>src->size)?0:data[n5],data[n6],(n7>src->size)?0:data[n7]);
}
for(n=8;n<=N;n=(n<<1))for(int j=0;j<N+N;j=j+n+n)
{
FFTCACL0(FFTDataRe[j],FFTDataRe[j+n]);
for(i=1,k=N/n;i<(n>>1);i++,k=k+N/n)
{
FFTCACL1(FFTDataRe[j+i],FFTDataIm[j+i],FFTDataRe[j+n+i],FFTDataIm[j+n+i],FFTDataRe[j+n-i],FFTDataIm[j+n-i]);
}
FFTCACL2(FFTDataIm[j+i],FFTDataRe[j+n+i]);
}
for(int i=N+1;i<N+N;i++) {FFTDataRe[i]=FFTDataRe[N+N-i];FFTDataIm[i]=0-FFTDataIm[N+N-i];}
}
if(p!=fft) {mWaveExchange(src,fft);mWaveRelease(fft);}
}
#define FFTCACL(re0,im0,re1,im1) {\
register float re_mul=Wre[k]*re1-Wim[k]*im1;\
register float im_mul=Wim[k]*re1+Wre[k]*im1;\
re1=re0-re_mul;im1=im0-im_mul;\
re0=re0+re_mul;im0=im0+im_mul;\
}
/*
void mWaveFFT(MWave *src,MWave *fft)
{
int i,j,k,n;
mException((INVALID_WAVE(src)),EXIT,"invalid input");
// mException((mInfoGet(&(src->info),"wave_type") != MORN_WAVE_TD),EXIT,"invalid input");
int N;
MHandle *hdl; ObjectHandle(src,WaveFFT,hdl);
struct HandleWaveFFT *handle = hdl->handle;
if(hdl->valid == 0)
{
mException((src->size<=4),EXIT,"invalid input");
k=1;while(src->size>(2<<k))k=k+1; N=(2<<k);
if(handle->size != N)
{
handle->size = N;
if(handle->order!=NULL) mFree(handle->order);handle->order=mMalloc(N*sizeof(int));
N=N>>1;handle->order[0]=0;j=1;
for(k=N;k>0;k=k>>1) {for(i=0;i<j;i++) handle->order[i+j]=handle->order[i]+k; j=j+j;}
if(handle->Wre!=NULL) mFree(handle->Wre);handle->Wre=mMalloc(N*sizeof(float));
if(handle->Wim!=NULL) mFree(handle->Wim);handle->Wim=mMalloc(N*sizeof(float));
double n_pi = MORN_PI/((double)N);double thta = n_pi;
handle->Wre[0] = 1.0f; handle->Wim[0] = 0.0f;
for(k=1;k<N;k++)
{
handle->Wre[k] = (float)cos(thta);
handle->Wim[k] = 0.0f-(float)sin(thta);
thta = thta + n_pi;
}
}
hdl->valid = 1;
}
N = handle->size;
float *Wre = handle->Wre; float *Wim = handle->Wim;
MWave *p=fft;
if((fft==NULL)||(fft == src)) fft = mWaveCreate(((src->channel)<<1),N,NULL);
else mWaveRedefine(fft,((src->channel)<<1),N,fft->data);
fft->info = src->info;
mInfoSet(&(fft->info),"wave_type",MORN_WAVE_FD);
mInfoSet(&(fft->info),"normalize",MORN_NOT_NORMALIZED);
N=(N>>1);
for(int cn=0;cn<src->channel;cn++)
{
float *FFTDataRe = fft->data[(cn<<1)];
float *FFTDataIm = fft->data[(cn<<1)+1];
float *data = src->data[cn];
for(i=0;i<N+N;i+=8)
{
int n0=handle->order[i ];int n1=handle->order[i+1];int n2=handle->order[i+2];int n3=handle->order[i+3];
int n4=handle->order[i+4];int n5=handle->order[i+5];int n6=handle->order[i+6];int n7=handle->order[i+7];
WaveFFT8(FFTDataRe+i,FFTDataIm+i,(n0>src->size)?0:data[n0],(n1>src->size)?0:data[n1],(n2>src->size)?0:data[n2],(n3>src->size)?0:data[n3],
(n4>src->size)?0:data[n4],(n5>src->size)?0:data[n5],(n6>src->size)?0:data[n6],(n7>src->size)?0:data[n7]);
}
for(n=8;n<=N;n=(n<<1))
{
for(j=0;j<(N<<1);j=j+(n<<1))
{
for(i=0,k=0;i<=(n>>1);i++,k=k+N/n)
FFTCACL(FFTDataRe[j+i],FFTDataIm[j+i],FFTDataRe[j+i+n],FFTDataIm[j+i+n]);
for(;i<n;i++)
{
FFTDataRe[j+i] = FFTDataRe[j+n+n-i];FFTDataIm[j+i] =-FFTDataIm[j+n+n-i];
FFTDataRe[j+i+n] = FFTDataRe[j+n-i];FFTDataIm[j+i+n] =-FFTDataIm[j+n-i];
}
}
}
}
if(p!=fft) {mWaveExchange(src,fft);mWaveRelease(fft);}
}
*/
#define HandleWaveIFFT HandleWaveFFT
#define endWaveIFFT endWaveFFT
#define HASH_WaveIFFT 0x81f00b75
void mWaveIFFT(MWave *fft,MWave *dst)
{
int i,j,k,n;
mException((INVALID_WAVE(fft)),EXIT,"invalid input");
int wave_type=0;mPropertyRead(fft,"wave_type",&wave_type);
mException((wave_type != MORN_WAVE_FD),EXIT,"invalid input");
int N;
MHandle *hdl=mHandle(fft,WaveIFFT);
struct HandleWaveIFFT *handle = (struct HandleWaveIFFT *)(hdl->handle);
if(hdl->valid == 0)
{
N=fft->size;mException((N<4)||((N&(N-1))!=0),EXIT,"invalid input");
if(handle->size != N)
{
handle->size = N;
if(handle->order!=NULL) mFree(handle->order);handle->order=(int *)mMalloc(N*sizeof(int));
N=N>>1;handle->order[0]=0;j=1;
for(k=N;k>0;k=k>>1) {for(i=0;i<j;i++) handle->order[i+j]=handle->order[i]+k; j=j+j;}
if(handle->Wre!=NULL) mFree(handle->Wre);handle->Wre=(float *)mMalloc(N*sizeof(float));
if(handle->Wim!=NULL) mFree(handle->Wim);handle->Wim=(float *)mMalloc(N*sizeof(float));
double n_pi = MORN_PI/((double)N);double thta = n_pi;
handle->Wre[0] = 1.0f; handle->Wim[0] = 0.0f;
for(k=1;k<N;k++)
{
handle->Wre[k] = (float)cos(thta);
handle->Wim[k] = (float)sin(thta);
thta = thta + n_pi;
}
}
hdl->valid = 1;
}
N = handle->size;
float *Wre = handle->Wre; float *Wim = handle->Wim;
MWave *p=dst;
if((dst==NULL)||(dst==fft)) dst = mWaveCreate(fft->channel,N,NULL);
else mWaveRedefine(dst,fft->channel,N,dst->data);
// dst->info = fft->info;
wave_type=MORN_WAVE_TD; mPropertyWrite(dst,"wave_type",&wave_type,sizeof(int));
int normalize=MORN_NOT_NORMALIZED;mPropertyWrite(dst,"normalize",&normalize,sizeof(float));
N=(N>>1);
for(int cn=0;cn<dst->channel;cn+=2)
{
float *FFTDataRe = dst->data[cn];
float *FFTDataIm = dst->data[cn+1];
float *fft_data_re=fft->data[cn];
float *fft_data_im=fft->data[cn+1];
for(i=0;i<N+N;i++)
{
int n=handle->order[i];
FFTDataRe[i]=fft_data_re[n];FFTDataIm[i]=fft_data_im[n];
}
for(n=1;n<=N;n=(n<<1))
for(j=0;j<(N<<1);j=j+(n<<1))
for(i=0,k=0;i<n;i++,k=k+N/n)
FFTCACL(FFTDataRe[j+i],FFTDataIm[j+i],FFTDataRe[j+i+n],FFTDataIm[j+i+n]);
for(i=0;i<N+N;i++) dst->data[cn>>1][i] = FFTDataRe[i]/((float)(N+N));
}
dst->channel=dst->channel>>1;
if(p!=dst) {mWaveExchange(fft,dst);mWaveRelease(dst);}
}
/*
void mWaveIFFT0(MWave *fft,MWave *dst)
{
int i,j,k,n;
int cn;
int wave_size;
double *DstDataRe;
double *DstDataIm;
int N;
double *Wre,*Wim;
int out_valid;
mException((INVALID_WAVE(fft)),EXIT,"invalid input");
mException((mInfoGet(&(fft->info),"wave_type") != MORN_WAVE_FD),EXIT,"invalid input");
wave_size = fft->size;
N = wave_size;
while((N&0x01)==0)
N = N>>1;
mException((N!=1),EXIT,"invalid input data");
N = wave_size;
if((INVALID_POINTER(dst))||(dst == fft))
{
out_valid = 0;
dst = mWaveCreate(((fft->channel)>>1),N,NULL);
}
else
{
out_valid = 1;
mWaveRedefine(dst,((fft->channel)>>1),N,dst->data);
}
dst->info = fft->info;
mInfoSet(&(dst->info),"wave_type",MORN_WAVE_TD);
DstDataRe = (double *)mMalloc(N*sizeof(double));
DstDataIm = (double *)mMalloc(N*sizeof(double));
N=(N>>1);
Wre = (double *)mMalloc(N*sizeof(double));
Wim = (double *)mMalloc(N*sizeof(double));
for(k=0;k<N;k++)
{
Wre[k] = cos((((double)(k))/((double)(N)))*MORN_PI);
Wim[k] = sin((((double)(k))/((double)(N)))*MORN_PI);
}
for(cn=0;cn<fft->channel;cn=cn+2)
{
DstDataRe[0] = fft->data[cn][0];
DstDataRe[N] = fft->data[cn][1];
DstDataIm[0] = fft->data[cn+1][0];
DstDataIm[N] = fft->data[cn+1][1];
for(i=1,j=N;i<N;i++)
{
printf("i is %d,j is %d\n",i,j);
printf("i is %d,j is %d\n",i+N,j+1);
DstDataRe[i] = fft->data[cn][j];
DstDataRe[i+N] = fft->data[cn][j+1];
DstDataIm[i] = fft->data[cn+1][j];
DstDataIm[i+N] = fft->data[cn+1][j+1];
k=N;
while(k<=j)
{
j=j-k;
k=k/2;
}
j=j+k;
}
for(i=0;i<N+N;i++)
printf("data is %f+%fi\n",DstDataRe[i],DstDataIm[i]);
for(n=1;n<=N;n=(n<<1))
{
//#pragma omp parallel for
for(j=0;j<(N<<1);j=j+(n<<1))
for(i=0,k=0;i<n;i++,k=k+N/n)
FFTCACL(DstDataRe[j+i],DstDataIm[j+i],DstDataRe[j+i+n],DstDataIm[j+i+n]);
}
for(i=0;i<wave_size;i++)
dst->data[cn>>1][i] = (float)(DstDataRe[i]/((double)wave_size));
}
mFree(Wre);
mFree(Wim);
mFree(DstDataRe);
mFree(DstDataIm);
if(!out_valid)
{
mWaveExchange(fft,dst);
mWaveRelease(dst);
}
}
*/
void mWavePowerSpectrum(MWave *fft,MWave *ps,int mode)
{
int wav_size;
float *re,*im;
float *ps_data;
int i,j;
if(mode == MORN_DEFAULT)
mode = MORN_SQUAR_POWERS;
mException(((mode<1)||(mode>3)),EXIT,"invalid input");
mException((INVALID_WAVE(fft)),EXIT,"invalid input");
int wave_type=0;mPropertyRead(fft,"wave_type",&wave_type);
mException((wave_type != MORN_WAVE_FD),EXIT,"invalid input");
wav_size = (fft->size)>>1;
if(INVALID_POINTER(ps))
ps = fft;
mWaveRedefine(ps,((fft->channel)>>1),wav_size,ps->data);
// ps->info = fft->info;
wave_type=MORN_WAVE_PS; mPropertyWrite(ps,"wave_type",&wave_type,sizeof(int));
int normalize=MORN_NOT_NORMALIZED;mPropertyWrite(ps,"normalize",&normalize,sizeof(float));
if(mode == MORN_SQUAR_POWERS)
{
for(j=0;j<ps->channel;j++)
{
re = fft->data[(j<<1)];
im = fft->data[(j<<1)+1];
ps_data = ps->data[j];
for(i=0;i<wav_size;i++)
ps_data[i] = re[i]*re[i] + im[i]*im[i];
}
}
else if(mode == MORN_POWERS)
{
for(j=0;j<ps->channel;j++)
{
re = fft->data[(j<<1)];
im = fft->data[(j<<1)+1];
ps_data = ps->data[j];
for(i=0;i<wav_size;i++)
ps_data[i] = (float)sqrt((double)(re[i]*re[i] + im[i]*im[i]));
}
}
else if(mode == MORN_LOG_POWERS)
{
for(j=0;j<ps->channel;j++)
{
re = fft->data[(j<<1)];
im = fft->data[(j<<1)+1];
ps_data = ps->data[j];
for(i=0;i<wav_size;i++)
ps_data[i] = (log10((double)(re[i]*re[i] + im[i]*im[i])))/2.0;
}
}
}
void mWaveFrequencyComponent(MWave *src,float frequency,float *component)
{
float src_frequency;mException(mPropertyRead(src,"frequency",&src_frequency)==NULL,EXIT,"invalid input");
float c = (MORN_PI+MORN_PI)*frequency/src_frequency;
for(int cn=0;cn<src->channel;cn++)
{
float *data = src->data[cn];
float e = 0;
float re = data[0];
float im = 0.0f;
for(int i=1;i<src->size;i++)
{
e = e+c;
re = re + data[i]*cos(e);
im = im - data[i]*sin(e);
}
component[cn] =re*re+im*im;
}
}
struct HandleWaveFrequencyAnalyse
{
int src_frequency;
int num;
float *frequency;
MMatrix *re_mat;
MMatrix *im_mat;
}HandleWaveFrequencyAnalyse;
#define HASH_WaveFrequencyAnalyse 0x77f2456d
void endWaveFrequencyAnalyse(void *info)
{
struct HandleWaveFrequencyAnalyse *handle = (struct HandleWaveFrequencyAnalyse *)info;
if(handle->frequency!=NULL) mFree(handle->frequency);
if(handle->re_mat != NULL) mMatrixRelease(handle->re_mat);
if(handle->im_mat != NULL) mMatrixRelease(handle->im_mat);
}
void mWaveFrequencyAnalyse(MWave *src,float *frequency,int num,float **component)
{
int cn,i,j;
MHandle *hdl=mHandle(src,WaveFrequencyAnalyse);
struct HandleWaveFrequencyAnalyse *handle = (struct HandleWaveFrequencyAnalyse *)(hdl->handle);
if(hdl->valid ==1)
{
if(num <=0) num = handle->num;
if(INVALID_POINTER(frequency)) frequency = handle->frequency;
float src_frequency=-1;mException(mPropertyRead(src,"frequency",&src_frequency)==NULL,EXIT,"invalid input");
if((handle->src_frequency != src_frequency)&&(src_frequency >0))
hdl->valid = 0;
else if(frequency != handle->frequency)
if(memcmp(handle->frequency,frequency,num*sizeof(float))!=0)
hdl->valid = 0;
}
if(hdl->valid == 0)
{
mException((num<=0)||(INVALID_POINTER(frequency)),EXIT,"invalid input");
handle->src_frequency=-1;mPropertyRead(src,"frequency",&(handle->src_frequency));
mException((handle->src_frequency<=0),EXIT,"invalid input");
if(num>handle->num) {mFree(handle->frequency);handle->frequency=NULL;}
if(handle->frequency==NULL) handle->frequency=(float *)mMalloc(num*sizeof(float));
handle->num = num;
if(handle->re_mat == NULL) handle->re_mat = mMatrixCreate(num,src->size,NULL);
else mMatrixRedefine(handle->re_mat,num,src->size,NULL);
if(handle->im_mat == NULL) handle->im_mat = mMatrixCreate(num,src->size,NULL);
else mMatrixRedefine(handle->im_mat,num,src->size,NULL);
memcpy(handle->frequency,frequency,num*sizeof(float));
for(j=0;j<num;j++)
{
double c = ((double)(MORN_PI+MORN_PI)*(handle->frequency[j]))/((double)(handle->src_frequency));
double e = 0.0;
handle->re_mat->data[j][0] = 1.0f;
handle->im_mat->data[j][0] = 0.0f;
for(i=1;i<src->size;i++)
{
e = e+c;
handle->re_mat->data[j][i] = cos(e);
handle->im_mat->data[j][i] = 0.0f - sin(e);
}
}
hdl->valid = 1;
}
MMatrix *re_mat = handle->re_mat;
MMatrix *im_mat = handle->im_mat;
for(cn=0;cn<src->channel;cn++)
{
for(j=0;j<num;j++)
{
float re = 0.0f;
float im = 0.0f;
for(i=0;i<src->size;i++)
{
re = re + src->data[cn][i]*re_mat->data[j][i];
im = im + src->data[cn][i]*im_mat->data[j][i];
}
component[cn][j] =re*re+im*im;
}
}
}
/*
void mWaveFrequencyAnalyse2(MWave *src,float *frequency,int num,float **component)
{
float *e,*c;
int i,j,cn;
float *re,*im;
float *data;
e = (float *)mMalloc(num*sizeof(float));
c = (float *)mMalloc(num*sizeof(float));
re = (float *)mMalloc(num*sizeof(float));
im = (float *)mMalloc(num*sizeof(float));
for(cn=0;cn<src->channel;cn++)
{
data = src->data[cn];
for(j=0;j<num;j++)
{
c[j] = (MORN_PI+MORN_PI)*frequency[j]/((float)(src->info.frequency));
e[j] = 0;
re[j] = data[0];
im[j] = 0.0;
}
for(i=1;i<src->size;i++)
for(j=0;j<num;j++)
{
e[j] = e[j]+c[j];
re[j] = re[j] + data[i]*cos(e[j]);
im[j] = im[j] - data[i]*sin(e[j]);
}
for(j=0;j<num;j++)
{
// printf("re[j] is %f,im[j] is %f\n",re[j],im[j]);
component[cn][j] =re[j]*re[j]+im[j]*im[j];
}
}
mFree(e);
mFree(c);
mFree(re);
mFree(im);
}
*/
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator();
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parses OpenMP context selectors and calls \p Callback for each
/// successfully parsed context selector.
bool
parseOpenMPContextSelectors(SourceLocation Loc,
SmallVectorImpl<Sema::OMPCtxSelectorData> &Data);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
MapTypeModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLastLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
NamedDecl *
ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
4.norace2.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 4
int main() {
int A[N][N][N][N][N][N][N][N][N];
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
for (int k = 1; k < N; k++)
for (int l = 1; l < N; l++)
for (int m = 1; m < N; m++)
for (int n = 1; n < N; n++)
#pragma omp parallel for
for (int o = 1; o < N; o++)
for (int p = 1; p < N; p++)
for (int q = 1; q < N; q++)
A[i][j][k][l][m][n][o][p][q] = A[i][j][k][l][m][n][o][p][q];
}
// CHECK: Region is Data Race Free.
// END
|
OMPIRBuilder.h | //===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the OpenMPIRBuilder class and helpers used as a convenient
// way to create LLVM instructions for OpenMP directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/Allocator.h"
#include <forward_list>
namespace llvm {
class CanonicalLoopInfo;
/// An interface to create LLVM-IR for OpenMP directives.
///
/// Each OpenMP directive has a corresponding public generator method.
class OpenMPIRBuilder {
public:
/// Create a new OpenMPIRBuilder operating on the given module \p M. This will
/// not have an effect on \p M (see initialize).
OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {}
~OpenMPIRBuilder();
/// Initialize the internal state, this will put structures types and
/// potentially other helpers into the underlying module. Must be called
/// before any other method and only once!
void initialize();
/// Finalize the underlying module, e.g., by outlining regions.
/// \param Fn The function to be finalized. If not used,
/// all functions are finalized.
void finalize(Function *Fn = nullptr);
/// Add attributes known for \p FnID to \p Fn.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
/// Type used throughout for insertion points.
using InsertPointTy = IRBuilder<>::InsertPoint;
/// Callback type for variable finalization (think destructors).
///
/// \param CodeGenIP is the insertion point at which the finalization code
/// should be placed.
///
/// A finalize callback knows about all objects that need finalization, e.g.
/// destruction, when the scope of the currently generated construct is left
/// at the time, and location, the callback is invoked.
using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
struct FinalizationInfo {
/// The finalization callback provided by the last in-flight invocation of
/// createXXXX for the directive of kind DK.
FinalizeCallbackTy FiniCB;
/// The directive kind of the innermost directive that has an associated
/// region which might require finalization when it is left.
omp::Directive DK;
/// Flag to indicate if the directive is cancellable.
bool IsCancellable;
};
/// Push a finalization callback on the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void pushFinalizationCB(const FinalizationInfo &FI) {
FinalizationStack.push_back(FI);
}
/// Pop the last finalization callback from the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void popFinalizationCB() { FinalizationStack.pop_back(); }
/// Callback type for body (=inner region) code generation
///
/// The callback takes code locations as arguments, each describing a
/// location at which code might need to be generated or a location that is
/// the target of control transfer.
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the body code should be
/// placed.
/// \param ContinuationBB is the basic block target to leave the body.
///
/// Note that all blocks pointed to by the arguments have terminators.
using BodyGenCallbackTy =
function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
// This is created primarily for sections construct as llvm::function_ref
// (BodyGenCallbackTy) is not storable (as described in the comments of
// function_ref class - function_ref contains non-ownable reference
// to the callable.
using StorableBodyGenCallbackTy =
std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
/// Callback type for loop body code generation.
///
/// \param CodeGenIP is the insertion point where the loop's body code must be
/// placed. This will be a dedicated BasicBlock with a
/// conditional branch from the loop condition check and
/// terminated with an unconditional branch to the loop
/// latch.
/// \param IndVar is the induction variable usable at the insertion point.
using LoopBodyGenCallbackTy =
function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
/// Callback type for variable privatization (think copy & default
/// constructor).
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the privatization code
/// should be placed.
/// \param Original The value being copied/created, should not be used in the
/// generated IR.
/// \param Inner The equivalent of \p Original that should be used in the
/// generated IR; this is equal to \p Original if the value is
/// a pointer and can thus be passed directly, otherwise it is
/// an equivalent but different value.
/// \param ReplVal The replacement value, thus a copy or new created version
/// of \p Inner.
///
/// \returns The new insertion point where code generation continues and
/// \p ReplVal the replacement value.
using PrivatizeCallbackTy = function_ref<InsertPointTy(
InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
Value &Inner, Value *&ReplVal)>;
/// Description of a LLVM-IR insertion point (IP) and a debug/source location
/// (filename, line, column, ...).
struct LocationDescription {
LocationDescription(const IRBuilderBase &IRB)
: IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
LocationDescription(const InsertPointTy &IP) : IP(IP) {}
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
: IP(IP), DL(DL) {}
InsertPointTy IP;
DebugLoc DL;
};
/// Emitter methods for OpenMP directives.
///
///{
/// Generator for '#omp barrier'
///
/// \param Loc The location where the barrier directive was encountered.
/// \param DK The kind of directive that caused the barrier.
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
bool ForceSimpleCall = false,
bool CheckCancelFlag = true);
/// Generator for '#omp cancel'
///
/// \param Loc The location where the directive was encountered.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param CanceledDirective The kind of directive that is cancled.
///
/// \returns The insertion point after the barrier.
InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
omp::Directive CanceledDirective);
/// Generator for '#omp parallel'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param BodyGenCB Callback that will generate the region code.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param NumThreads The evaluated 'num_threads' clause expression, if any.
/// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
/// \param IsCancellable Flag to indicate a cancellable parallel region.
///
/// \returns The insertion position *after* the parallel.
IRBuilder<>::InsertPoint
createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, Value *IfCondition,
Value *NumThreads, omp::ProcBindKind ProcBind,
bool IsCancellable);
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// This generator operates on the logical iteration space of the loop, i.e.
/// the caller only has to provide a loop trip count of the loop as defined by
/// base language semantics. The trip count is interpreted as an unsigned
/// integer. The induction variable passed to \p BodyGenCB will be of the same
/// type and run from 0 to \p TripCount - 1. It is up to the callback to
/// convert the logical iteration variable to the loop counter variable in the
/// loop body.
///
/// \param Loc The insert and source location description. The insert
/// location can be between two instructions or the end of a
/// degenerate block (e.g. a BB under construction).
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param TripCount Number of iterations the loop body is executed.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *TripCount,
const Twine &Name = "loop");
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// Instead of a logical iteration space, this allows specifying user-defined
/// loop counter values using increment, upper- and lower bounds. To
/// disambiguate the terminology when counting downwards, instead of lower
/// bounds we use \p Start for the loop counter value in the first body
/// iteration.
///
/// Consider the following limitations:
///
/// * A loop counter space over all integer values of its bit-width cannot be
/// represented. E.g using uint8_t, its loop trip count of 256 cannot be
/// stored into an 8 bit integer):
///
/// DO I = 0, 255, 1
///
/// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
/// effectively counting downwards:
///
/// for (uint8_t i = 100u; i > 0; i += 127u)
///
///
/// TODO: May need to add additional parameters to represent:
///
/// * Allow representing downcounting with unsigned integers.
///
/// * Sign of the step and the comparison operator might disagree:
///
/// for (int i = 0; i < 42; i -= 1u)
///
//
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param Start Value of the loop counter for the first iterations.
/// \param Stop Loop counter values past this will stop the loop.
/// \param Step Loop counter increment after each iteration; negative
/// means counting down.
/// \param IsSigned Whether Start, Stop and Step are signed integers.
/// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
/// counter.
/// \param ComputeIP Insertion point for instructions computing the trip
/// count. Can be used to ensure the trip count is available
/// at the outermost loop of a loop nest. If not set,
/// defaults to the preheader of the generated loop.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step,
bool IsSigned, bool InclusiveStop,
InsertPointTy ComputeIP = {},
const Twine &Name = "loop");
/// Collapse a loop nest into a single loop.
///
/// Merges loops of a loop nest into a single CanonicalLoopNest representation
/// that has the same number of innermost loop iterations as the origin loop
/// nest. The induction variables of the input loops are derived from the
/// collapsed loop's induction variable. This is intended to be used to
/// implement OpenMP's collapse clause. Before applying a directive,
/// collapseLoops normalizes a loop nest to contain only a single loop and the
/// directive's implementation does not need to handle multiple loops itself.
/// This does not remove the need to handle all loop nest handling by
/// directives, such as the ordered(<n>) clause or the simd schedule-clause
/// modifier of the worksharing-loop directive.
///
/// Example:
/// \code
/// for (int i = 0; i < 7; ++i) // Canonical loop "i"
/// for (int j = 0; j < 9; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After collapsing with Loops={i,j}, the loop is changed to
/// \code
/// for (int ij = 0; ij < 63; ++ij) {
/// int i = ij / 9;
/// int j = ij % 9;
/// body(i, j);
/// }
/// \endcode
///
/// In the current implementation, the following limitations apply:
///
/// * All input loops have an induction variable of the same type.
///
/// * The collapsed loop will have the same trip count integer type as the
/// input loops. Therefore it is possible that the collapsed loop cannot
/// represent all iterations of the input loops. For instance, assuming a
/// 32 bit integer type, and two input loops both iterating 2^16 times, the
/// theoretical trip count of the collapsed loop would be 2^32 iteration,
/// which cannot be represented in an 32-bit integer. Behavior is undefined
/// in this case.
///
/// * The trip counts of every input loop must be available at \p ComputeIP.
/// Non-rectangular loops are not yet supported.
///
/// * At each nest level, code between a surrounding loop and its nested loop
/// is hoisted into the loop body, and such code will be executed more
/// often than before collapsing (or not at all if any inner loop iteration
/// has a trip count of 0). This is permitted by the OpenMP specification.
///
/// \param DL Debug location for instructions added for collapsing,
/// such as instructions to compute/derive the input loop's
/// induction variables.
/// \param Loops Loops in the loop nest to collapse. Loops are specified
/// from outermost-to-innermost and every control flow of a
/// loop's body must pass through its directly nested loop.
/// \param ComputeIP Where additional instruction that compute the collapsed
/// trip count. If not set, defaults to before the generated
/// loop.
///
/// \returns The CanonicalLoopInfo object representing the collapsed loop.
CanonicalLoopInfo *collapseLoops(DebugLoc DL,
ArrayRef<CanonicalLoopInfo *> Loops,
InsertPointTy ComputeIP);
/// Modifies the canonical loop to be a statically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// TODO: Workshare loops with static scheduling may contain up to two loops
/// that fulfill the requirements of an OpenMP canonical loop. One for
/// iterating over all iterations of a chunk and another one for iterating
/// over all chunks that are executed on the same thread. Returning
/// CanonicalLoopInfo objects representing them may eventually be useful for
/// the apply clause planned in OpenMP 6.0, but currently whether these are
/// canonical loops is irrelevant.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be inserted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain, and then in each iteration
/// to update the loop counter.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param SchedType Type of scheduling to be passed to the init function.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
omp::OMPScheduleType SchedType,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP, bool NeedsBarrier);
/// Tile a loop nest.
///
/// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
/// \p/ Loops must be perfectly nested, from outermost to innermost loop
/// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
/// of every loop and every tile sizes must be usable in the outermost
/// loop's preheader. This implies that the loop nest is rectangular.
///
/// Example:
/// \code
/// for (int i = 0; i < 15; ++i) // Canonical loop "i"
/// for (int j = 0; j < 14; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
/// \code
/// for (int i1 = 0; i1 < 3; ++i1)
/// for (int j1 = 0; j1 < 2; ++j1)
/// for (int i2 = 0; i2 < 5; ++i2)
/// for (int j2 = 0; j2 < 7; ++j2)
/// body(i1*3+i2, j1*3+j2);
/// \endcode
///
/// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
/// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
/// handles non-constant trip counts, non-constant tile sizes and trip counts
/// that are not multiples of the tile size. In the latter case the tile loop
/// of the last floor-loop iteration will have fewer iterations than specified
/// as its tile size.
///
///
/// @param DL Debug location for instructions added by tiling, for
/// instance the floor- and tile trip count computation.
/// @param Loops Loops to tile. The CanonicalLoopInfo objects are
/// invalidated by this method, i.e. should not used after
/// tiling.
/// @param TileSizes For each loop in \p Loops, the tile size for that
/// dimensions.
///
/// \returns A list of generated loops. Contains twice as many loops as the
/// input loop nest; the first half are the floor loops and the
/// second half are the tile loops.
std::vector<CanonicalLoopInfo *>
tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
ArrayRef<Value *> TileSizes);
/// Fully unroll a loop.
///
/// Instead of unrolling the loop immediately (and duplicating its body
/// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
/// metadata.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Fully or partially unroll a loop. How the loop is unrolled is determined
/// using LLVM's LoopUnrollPass.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Partially unroll a loop.
///
/// The CanonicalLoopInfo of the unrolled loop for use with chained
/// loop-associated directive can be requested using \p UnrolledCLI. Not
/// needing the CanonicalLoopInfo allows more efficient code generation by
/// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
/// A loop-associated directive applied to the unrolled loop needs to know the
/// new trip count which means that if using a heuristically determined unroll
/// factor (\p Factor == 0), that factor must be computed immediately. We are
/// using the same logic as the LoopUnrollPass to derived the unroll factor,
/// but which assumes that some canonicalization has taken place (e.g.
/// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
/// better when the unrolled loop's CanonicalLoopInfo is not needed.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
/// \param Factor The factor to unroll the loop by. A factor of 0
/// indicates that a heuristic should be used to determine
/// the unroll-factor.
/// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
/// partially unrolled loop. Otherwise, uses loop metadata
/// to defer unrolling to the LoopUnrollPass.
void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
CanonicalLoopInfo **UnrolledCLI);
/// Add metadata to simd-ize a loop.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to simd-ize.
void applySimd(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Generator for '#omp flush'
///
/// \param Loc The location where the flush directive was encountered
void createFlush(const LocationDescription &Loc);
/// Generator for '#omp taskwait'
///
/// \param Loc The location where the taskwait directive was encountered.
void createTaskwait(const LocationDescription &Loc);
/// Generator for '#omp taskyield'
///
/// \param Loc The location where the taskyield directive was encountered.
void createTaskyield(const LocationDescription &Loc);
/// Functions used to generate reductions. Such functions take two Values
/// representing LHS and RHS of the reduction, respectively, and a reference
/// to the value that is updated to refer to the reduction result.
using ReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>;
/// Functions used to generate atomic reductions. Such functions take two
/// Values representing pointers to LHS and RHS of the reduction, as well as
/// the element type of these pointers. They are expected to atomically
/// update the LHS to the reduced value.
using AtomicReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>;
/// Information about an OpenMP reduction.
struct ReductionInfo {
ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable,
ReductionGenTy ReductionGen,
AtomicReductionGenTy AtomicReductionGen)
: ElementType(ElementType), Variable(Variable),
PrivateVariable(PrivateVariable), ReductionGen(ReductionGen),
AtomicReductionGen(AtomicReductionGen) {
assert(cast<PointerType>(Variable->getType())
->isOpaqueOrPointeeTypeMatches(ElementType) && "Invalid elem type");
}
/// Reduction element type, must match pointee type of variable.
Type *ElementType;
/// Reduction variable of pointer type.
Value *Variable;
/// Thread-private partial reduction variable.
Value *PrivateVariable;
/// Callback for generating the reduction body. The IR produced by this will
/// be used to combine two values in a thread-safe context, e.g., under
/// lock or within the same thread, and therefore need not be atomic.
ReductionGenTy ReductionGen;
/// Callback for generating the atomic reduction body, may be null. The IR
/// produced by this will be used to atomically combine two values during
/// reduction. If null, the implementation will use the non-atomic version
/// along with the appropriate synchronization mechanisms.
AtomicReductionGenTy AtomicReductionGen;
};
// TODO: provide atomic and non-atomic reduction generators for reduction
// operators defined by the OpenMP specification.
/// Generator for '#omp reduction'.
///
/// Emits the IR instructing the runtime to perform the specific kind of
/// reductions. Expects reduction variables to have been privatized and
/// initialized to reduction-neutral values separately. Emits the calls to
/// runtime functions as well as the reduction function and the basic blocks
/// performing the reduction atomically and non-atomically.
///
/// The code emitted for the following:
///
/// \code
/// type var_1;
/// type var_2;
/// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
/// /* body */;
/// \endcode
///
/// corresponds to the following sketch.
///
/// \code
/// void _outlined_par() {
/// // N is the number of different reductions.
/// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
/// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
/// _omp_reduction_func,
/// _gomp_critical_user.reduction.var)) {
/// case 1: {
/// var_1 = var_1 <reduction-op> privatized_var_1;
/// var_2 = var_2 <reduction-op> privatized_var_2;
/// // ...
/// __kmpc_end_reduce(...);
/// break;
/// }
/// case 2: {
/// _Atomic<ReductionOp>(var_1, privatized_var_1);
/// _Atomic<ReductionOp>(var_2, privatized_var_2);
/// // ...
/// break;
/// }
/// default: break;
/// }
/// }
///
/// void _omp_reduction_func(void **lhs, void **rhs) {
/// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
/// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
/// // ...
/// }
/// \endcode
///
/// \param Loc The location where the reduction was
/// encountered. Must be within the associate
/// directive and after the last local access to the
/// reduction variables.
/// \param AllocaIP An insertion point suitable for allocas usable
/// in reductions.
/// \param ReductionInfos A list of info on each reduction variable.
/// \param IsNoWait A flag set if the reduction is marked as nowait.
InsertPointTy createReductions(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<ReductionInfo> ReductionInfos,
bool IsNoWait = false);
///}
/// Return the insertion point used by the underlying IRBuilder.
InsertPointTy getInsertionPoint() { return Builder.saveIP(); }
/// Update the internal location to \p Loc.
bool updateToLocation(const LocationDescription &Loc) {
Builder.restoreIP(Loc.IP);
Builder.SetCurrentDebugLocation(Loc.DL);
return Loc.IP.getBlock() != nullptr;
}
/// Return the function declaration for the runtime function with \p FnID.
FunctionCallee getOrCreateRuntimeFunction(Module &M,
omp::RuntimeFunction FnID);
Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
/// Return the (LLVM-IR) string describing the source location \p LocStr.
Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the default source location.
Constant *getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the source location identified by
/// the arguments.
Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
unsigned Line, unsigned Column,
uint32_t &SrcLocStrSize);
/// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
/// fallback if \p DL does not specify the function name.
Constant *getOrCreateSrcLocStr(DebugLoc DL, uint32_t &SrcLocStrSize,
Function *F = nullptr);
/// Return the (LLVM-IR) string describing the source location \p Loc.
Constant *getOrCreateSrcLocStr(const LocationDescription &Loc,
uint32_t &SrcLocStrSize);
/// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
/// TODO: Create a enum class for the Reserve2Flags
Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize,
omp::IdentFlag Flags = omp::IdentFlag(0),
unsigned Reserve2Flags = 0);
/// Create a hidden global flag \p Name in the module with initial value \p
/// Value.
GlobalValue *createGlobalFlag(unsigned Value, StringRef Name);
/// Generate control flow and cleanup for cancellation.
///
/// \param CancelFlag Flag indicating if the cancellation is performed.
/// \param CanceledDirective The kind of directive that is cancled.
/// \param ExitCB Extra code to be generated in the exit block.
void emitCancelationCheckImpl(Value *CancelFlag,
omp::Directive CanceledDirective,
FinalizeCallbackTy ExitCB = {});
/// Generate a barrier runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
/// \param DK The directive which caused the barrier
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
omp::Directive DK, bool ForceSimpleCall,
bool CheckCancelFlag);
/// Generate a flush runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitFlush(const LocationDescription &Loc);
/// The finalization stack made up of finalize callbacks currently in-flight,
/// wrapped into FinalizationInfo objects that reference also the finalization
/// target block and the kind of cancellable directive.
SmallVector<FinalizationInfo, 8> FinalizationStack;
/// Return true if the last entry in the finalization stack is of kind \p DK
/// and cancellable.
bool isLastFinalizationInfoCancellable(omp::Directive DK) {
return !FinalizationStack.empty() &&
FinalizationStack.back().IsCancellable &&
FinalizationStack.back().DK == DK;
}
/// Generate a taskwait runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskwaitImpl(const LocationDescription &Loc);
/// Generate a taskyield runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskyieldImpl(const LocationDescription &Loc);
/// Return the current thread ID.
///
/// \param Ident The ident (ident_t*) describing the query origin.
Value *getOrCreateThreadID(Value *Ident);
/// The underlying LLVM-IR module
Module &M;
/// The LLVM-IR Builder used to create IR.
IRBuilder<> Builder;
/// Map to remember source location strings
StringMap<Constant *> SrcLocStrMap;
/// Map to remember existing ident_t*.
DenseMap<std::pair<Constant *, uint64_t>, Constant *> IdentMap;
/// Helper that contains information about regions we need to outline
/// during finalization.
struct OutlineInfo {
using PostOutlineCBTy = std::function<void(Function &)>;
PostOutlineCBTy PostOutlineCB;
BasicBlock *EntryBB, *ExitBB;
SmallVector<Value *, 2> ExcludeArgsFromAggregate;
/// Collect all blocks in between EntryBB and ExitBB in both the given
/// vector and set.
void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
SmallVectorImpl<BasicBlock *> &BlockVector);
/// Return the function that contains the region to be outlined.
Function *getFunction() const { return EntryBB->getParent(); }
};
/// Collection of regions that need to be outlined during finalization.
SmallVector<OutlineInfo, 16> OutlineInfos;
/// Collection of owned canonical loop objects that eventually need to be
/// free'd.
std::forward_list<CanonicalLoopInfo> LoopInfos;
/// Add a new region that will be outlined later.
void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars;
/// Create the global variable holding the offload mappings information.
GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
std::string VarName);
/// Create the global variable holding the offload names information.
GlobalVariable *
createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
std::string VarName);
struct MapperAllocas {
AllocaInst *ArgsBase = nullptr;
AllocaInst *Args = nullptr;
AllocaInst *ArgSizes = nullptr;
};
/// Create the allocas instruction used in call to mapper functions.
void createMapperAllocas(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumOperands,
struct MapperAllocas &MapperAllocas);
/// Create the call for the target mapper function.
/// \param Loc The source location description.
/// \param MapperFunc Function to be called.
/// \param SrcLocInfo Source location information global.
/// \param MaptypesArg The argument types.
/// \param MapnamesArg The argument names.
/// \param MapperAllocas The AllocaInst used for the call.
/// \param DeviceID Device ID for the call.
/// \param NumOperands Number of operands in the call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
struct MapperAllocas &MapperAllocas, int64_t DeviceID,
unsigned NumOperands);
public:
/// Generator for __kmpc_copyprivate
///
/// \param Loc The source location description.
/// \param BufSize Number of elements in the buffer.
/// \param CpyBuf List of pointers to data to be copied.
/// \param CpyFn function to call for copying data.
/// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
///
/// \return The insertion position *after* the CopyPrivate call.
InsertPointTy createCopyPrivate(const LocationDescription &Loc,
llvm::Value *BufSize, llvm::Value *CpyBuf,
llvm::Value *CpyFn, llvm::Value *DidIt);
/// Generator for '#omp single'
///
/// \param Loc The source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param DidIt Local variable used as a flag to indicate 'single' thread
///
/// \returns The insertion position *after* the single call.
InsertPointTy createSingle(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, llvm::Value *DidIt);
/// Generator for '#omp master'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMaster(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generator for '#omp masked'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finialize variable copies.
///
/// \returns The insertion position *after* the masked.
InsertPointTy createMasked(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, Value *Filter);
/// Generator for '#omp critical'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \param CriticalName name of the lock used by the critical directive
/// \param HintInst Hint Instruction for hint clause associated with critical
///
/// \returns The insertion position *after* the critical.
InsertPointTy createCritical(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
StringRef CriticalName, Value *HintInst);
/// Generator for '#omp ordered depend (source | sink)'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param NumLoops The number of loops in depend clause.
/// \param StoreValues The value will be stored in vector address.
/// \param Name The name of alloca instruction.
/// \param IsDependSource If true, depend source; otherwise, depend sink.
///
/// \return The insertion position *after* the ordered.
InsertPointTy createOrderedDepend(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumLoops,
ArrayRef<llvm::Value *> StoreValues,
const Twine &Name, bool IsDependSource);
/// Generator for '#omp ordered [threads | simd]'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param IsThreads If true, with threads clause or without clause;
/// otherwise, with simd clause;
///
/// \returns The insertion position *after* the ordered.
InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
bool IsThreads);
/// Generator for '#omp sections'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param SectionCBs Callbacks that will generate body of each section.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IsCancellable Flag to indicate a cancellable parallel region.
/// \param IsNowait If true, barrier - to ensure all sections are executed
/// before moving forward will not be generated.
/// \returns The insertion position *after* the sections.
InsertPointTy createSections(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, bool IsCancellable,
bool IsNowait);
/// Generator for '#omp section'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \returns The insertion position *after* the section.
InsertPointTy createSection(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generate conditional branch and relevant BasicBlocks through which private
/// threads copy the 'copyin' variables from Master copy to threadprivate
/// copies.
///
/// \param IP insertion block for copyin conditional
/// \param MasterVarPtr a pointer to the master variable
/// \param PrivateVarPtr a pointer to the threadprivate variable
/// \param IntPtrTy Pointer size type
/// \param BranchtoEnd Create a branch between the copyin.not.master blocks
// and copy.in.end block
///
/// \returns The insertion point where copying operation to be emitted.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
Value *PrivateAddr,
llvm::IntegerType *IntPtrTy,
bool BranchtoEnd = true);
/// Create a runtime call for kmpc_Alloc
///
/// \param Loc The insert and source location description.
/// \param Size Size of allocated memory space
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_alloc
///
/// \returns CallInst to the OMP_Alloc call
CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_free
///
/// \param Loc The insert and source location description.
/// \param Addr Address of memory space to be freed
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_Free
///
/// \returns CallInst to the OMP_Free call
CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_threadprivate_cached
///
/// \param Loc The insert and source location description.
/// \param Pointer pointer to data to be cached
/// \param Size size of data to be cached
/// \param Name Name of call Instruction for callinst
///
/// \returns CallInst to the thread private cache call.
CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
llvm::Value *Pointer,
llvm::ConstantInt *Size,
const llvm::Twine &Name = Twine(""));
/// Create a runtime call for __tgt_interop_init
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param InteropType type of interop operation
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_init call
CallInst *createOMPInteropInit(const LocationDescription &Loc,
Value *InteropVar,
omp::OMPInteropType InteropType, Value *Device,
Value *NumDependences,
Value *DependenceAddress,
bool HaveNowaitClause);
/// Create a runtime call for __tgt_interop_destroy
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_destroy call
CallInst *createOMPInteropDestroy(const LocationDescription &Loc,
Value *InteropVar, Value *Device,
Value *NumDependences,
Value *DependenceAddress,
bool HaveNowaitClause);
/// Create a runtime call for __tgt_interop_use
///
/// \param Loc The insert and source location description.
/// \param InteropVar variable to be allocated
/// \param Device devide to which offloading will occur
/// \param NumDependences number of dependence variables
/// \param DependenceAddress pointer to dependence variables
/// \param HaveNowaitClause does nowait clause exist
///
/// \returns CallInst to the __tgt_interop_use call
CallInst *createOMPInteropUse(const LocationDescription &Loc,
Value *InteropVar, Value *Device,
Value *NumDependences, Value *DependenceAddress,
bool HaveNowaitClause);
/// The `omp target` interface
///
/// For more information about the usage of this interface,
/// \see openmp/libomptarget/deviceRTLs/common/include/target.h
///
///{
/// Create a runtime call for kmpc_target_init
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
/// Create a runtime call for kmpc_target_deinit
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
///}
/// Declarations for LLVM-IR types (simple, array, function and structure) are
/// generated below. Their names are defined and used in OpenMPKinds.def. Here
/// we provide the declarations, the initializeTypes function will provide the
/// values.
///
///{
#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
ArrayType *VarName##Ty = nullptr; \
PointerType *VarName##PtrTy = nullptr;
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
FunctionType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
StructType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
///}
private:
/// Create all simple and struct types exposed by the runtime and remember
/// the llvm::PointerTypes of them for easy access later.
void initializeTypes(Module &M);
/// Common interface for generating entry calls for OMP Directives.
/// if the directive has a region/body, It will set the insertion
/// point to the body
///
/// \param OMPD Directive to generate entry blocks for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitBB block where the region ends.
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
BasicBlock *ExitBB,
bool Conditional = false);
/// Common interface to finalize the region
///
/// \param OMPD Directive to generate exiting code for
/// \param FinIP Insertion point for emitting Finalization code and exit call
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
InsertPointTy FinIP,
Instruction *ExitCall,
bool HasFinalize = true);
/// Common Interface to generate OMP inlined regions
///
/// \param OMPD Directive to generate inlined region for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param BodyGenCB Body code generation callback.
/// \param FiniCB Finalization Callback. Will be called when finalizing region
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
/// \param IsCancellable if HasFinalize is set to true, indicate if the
/// the directive should be cancellable.
/// \return The insertion point after the region
InsertPointTy
EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, bool Conditional = false,
bool HasFinalize = true, bool IsCancellable = false);
/// Get the platform-specific name separator.
/// \param Parts different parts of the final name that needs separation
/// \param FirstSeparator First separator used between the initial two
/// parts of the name.
/// \param Separator separator used between all of the rest consecutive
/// parts of the name
static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
StringRef FirstSeparator,
StringRef Separator);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name,
unsigned AddressSpace = 0);
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
Value *getOMPCriticalRegionLock(StringRef CriticalName);
/// Callback type for Atomic Expression update
/// ex:
/// \code{.cpp}
/// unsigned x = 0;
/// #pragma omp atomic update
/// x = Expr(x_old); //Expr() is any legal operation
/// \endcode
///
/// \param XOld the value of the atomic memory address to use for update
/// \param IRB reference to the IRBuilder to use
///
/// \returns Value to update X to.
using AtomicUpdateCallbackTy =
const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
private:
enum AtomicKind { Read, Write, Update, Capture, Compare };
/// Determine whether to emit flush or not
///
/// \param Loc The insert and source location description.
/// \param AO The required atomic ordering
/// \param AK The OpenMP atomic operation kind used.
///
/// \returns wether a flush was emitted or not
bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
AtomicOrdering AO, AtomicKind AK);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param AllocaIP The insertion point to be used for alloca
/// instructions.
/// \param X The target atomic pointer to be updated
/// \param XElemTy The element type of the atomic pointer.
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW,
/// or belong to {FADD, FSUB, BAD_BINOP}.
/// Then a `cmpExch` based atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param VolatileX true if \a X volatile?
/// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \returns A pair of the old value of X before the update, and the value
/// used for the update.
std::pair<Value *, Value *>
emitAtomicUpdate(InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool VolatileX,
bool IsXBinopExpr);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
/// \Return The instruction
Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
AtomicRMWInst::BinOp RMWOp);
public:
/// a struct to pack relevant information while generating atomic Ops
struct AtomicOpValue {
Value *Var = nullptr;
Type *ElemTy = nullptr;
bool IsSigned = false;
bool IsVolatile = false;
};
/// Emit atomic Read for : V = X --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically read
/// \param V Memory address where to store atomically read
/// value
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic read IR.
InsertPointTy createAtomicRead(const LocationDescription &Loc,
AtomicOpValue &X, AtomicOpValue &V,
AtomicOrdering AO);
/// Emit atomic write for : X = Expr --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically written to
/// \param Expr The value to store.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic Write IR.
InsertPointTy createAtomicWrite(const LocationDescription &Loc,
AtomicOpValue &X, Value *Expr,
AtomicOrdering AO);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param RMWOp The binary operation used for update. If operation
/// is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \return Insertion point after generated atomic update IR.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
InsertPointTy AllocaIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool IsXBinopExpr);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
/// X = X BinOp Expr; V = X,
/// V = X; X = Expr BinOp X,
/// X = Expr BinOp X; V = X,
/// V = X; X = UpdateOp(X),
/// X = UpdateOp(X); V = X,
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param X The target atomic pointer to be updated
/// \param V Memory address where to store captured value
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param UpdateExpr true if X is an in place update of the form
/// X = X BinOp Expr or X = Expr BinOp X
/// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy
createAtomicCapture(const LocationDescription &Loc, InsertPointTy AllocaIP,
AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
bool IsPostfixUpdate, bool IsXBinopExpr);
/// Emit atomic compare for constructs: --- Only scalar data types
/// cond-update-atomic:
/// x = x ordop expr ? expr : x;
/// x = expr ordop x ? expr : x;
/// x = x == e ? d : x;
/// x = e == x ? d : x; (this one is not in the spec)
/// cond-update-stmt:
/// if (x ordop expr) { x = expr; }
/// if (expr ordop x) { x = expr; }
/// if (x == e) { x = d; }
/// if (e == x) { x = d; } (this one is not in the spec)
///
/// \param Loc The insert and source location description.
/// \param X The target atomic pointer to be updated.
/// \param E The expected value ('e') for forms that use an
/// equality comparison or an expression ('expr') for
/// forms that use 'ordop' (logically an atomic maximum or
/// minimum).
/// \param D The desired value for forms that use an equality
/// comparison. If forms that use 'ordop', it should be
/// \p nullptr.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param OP Atomic compare operation. It can only be ==, <, or >.
/// \param IsXBinopExpr True if the conditional statement is in the form where
/// x is on LHS. It only matters for < or >.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy createAtomicCompare(const LocationDescription &Loc,
AtomicOpValue &X, Value *E, Value *D,
AtomicOrdering AO,
omp::OMPAtomicCompareOp Op,
bool IsXBinopExpr);
/// Create the control flow structure of a canonical OpenMP loop.
///
/// The emitted loop will be disconnected, i.e. no edge to the loop's
/// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
/// IRBuilder location is not preserved.
///
/// \param DL DebugLoc used for the instructions in the skeleton.
/// \param TripCount Value to be used for the trip count.
/// \param F Function in which to insert the BasicBlocks.
/// \param PreInsertBefore Where to insert BBs that execute before the body,
/// typically the body itself.
/// \param PostInsertBefore Where to insert BBs that execute after the body.
/// \param Name Base name used to derive BB
/// and instruction names.
///
/// \returns The CanonicalLoopInfo that represents the emitted loop.
CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
Function *F,
BasicBlock *PreInsertBefore,
BasicBlock *PostInsertBefore,
const Twine &Name = {});
};
/// Class to represented the control flow structure of an OpenMP canonical loop.
///
/// The control-flow structure is standardized for easy consumption by
/// directives associated with loops. For instance, the worksharing-loop
/// construct may change this control flow such that each loop iteration is
/// executed on only one thread. The constraints of a canonical loop in brief
/// are:
///
/// * The number of loop iterations must have been computed before entering the
/// loop.
///
/// * Has an (unsigned) logical induction variable that starts at zero and
/// increments by one.
///
/// * The loop's CFG itself has no side-effects. The OpenMP specification
/// itself allows side-effects, but the order in which they happen, including
/// how often or whether at all, is unspecified. We expect that the frontend
/// will emit those side-effect instructions somewhere (e.g. before the loop)
/// such that the CanonicalLoopInfo itself can be side-effect free.
///
/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
/// execution of a loop body that satifies these constraints. It does NOT
/// represent arbitrary SESE regions that happen to contain a loop. Do not use
/// CanonicalLoopInfo for such purposes.
///
/// The control flow can be described as follows:
///
/// Preheader
/// |
/// /-> Header
/// | |
/// | Cond---\
/// | | |
/// | Body |
/// | | | |
/// | <...> |
/// | | | |
/// \--Latch |
/// |
/// Exit
/// |
/// After
///
/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
/// including) and end at AfterIP (at the After's first instruction, excluding).
/// That is, instructions in the Preheader and After blocks (except the
/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
/// side-effects. Typically, the Preheader is used to compute the loop's trip
/// count. The instructions from BodyIP (at the Body block's first instruction,
/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
/// control and thus can have side-effects. The body block is the single entry
/// point into the loop body, which may contain arbitrary control flow as long
/// as all control paths eventually branch to the Latch block.
///
/// TODO: Consider adding another standardized BasicBlock between Body CFG and
/// Latch to guarantee that there is only a single edge to the latch. It would
/// make loop transformations easier to not needing to consider multiple
/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
/// executes after each body iteration.
///
/// There must be no loop-carried dependencies through llvm::Values. This is
/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
/// for the induction variable.
///
/// All code in Header, Cond, Latch and Exit (plus the terminator of the
/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
/// by assertOK(). They are expected to not be modified unless explicitly
/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
/// anymore as its underlying control flow may not exist anymore.
/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
/// may also return a new CanonicalLoopInfo that can be passed to other
/// loop-associated construct implementing methods. These loop-transforming
/// methods may either create a new CanonicalLoopInfo usually using
/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
/// modify one of the input CanonicalLoopInfo and return it as representing the
/// modified loop. What is done is an implementation detail of
/// transformation-implementing method and callers should always assume that the
/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
/// created by createCanonicalLoop, such that transforming methods do not have
/// to special case where the CanonicalLoopInfo originated from.
///
/// Generally, methods consuming CanonicalLoopInfo do not need an
/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
/// any InsertPoint in the Preheader, After or Block can still be used after
/// calling such a method.
///
/// TODO: Provide mechanisms for exception handling and cancellation points.
///
/// Defined outside OpenMPIRBuilder because nested classes cannot be
/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
class CanonicalLoopInfo {
friend class OpenMPIRBuilder;
private:
BasicBlock *Header = nullptr;
BasicBlock *Cond = nullptr;
BasicBlock *Latch = nullptr;
BasicBlock *Exit = nullptr;
/// Add the control blocks of this loop to \p BBs.
///
/// This does not include any block from the body, including the one returned
/// by getBody().
///
/// FIXME: This currently includes the Preheader and After blocks even though
/// their content is (mostly) not under CanonicalLoopInfo's control.
/// Re-evaluated whether this makes sense.
void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
public:
/// Returns whether this object currently represents the IR of a loop. If
/// returning false, it may have been consumed by a loop transformation or not
/// been intialized. Do not use in this case;
bool isValid() const { return Header; }
/// The preheader ensures that there is only a single edge entering the loop.
/// Code that must be execute before any loop iteration can be emitted here,
/// such as computing the loop trip count and begin lifetime markers. Code in
/// the preheader is not considered part of the canonical loop.
BasicBlock *getPreheader() const;
/// The header is the entry for each iteration. In the canonical control flow,
/// it only contains the PHINode for the induction variable.
BasicBlock *getHeader() const {
assert(isValid() && "Requires a valid canonical loop");
return Header;
}
/// The condition block computes whether there is another loop iteration. If
/// yes, branches to the body; otherwise to the exit block.
BasicBlock *getCond() const {
assert(isValid() && "Requires a valid canonical loop");
return Cond;
}
/// The body block is the single entry for a loop iteration and not controlled
/// by CanonicalLoopInfo. It can contain arbitrary control flow but must
/// eventually branch to the \p Latch block.
BasicBlock *getBody() const {
assert(isValid() && "Requires a valid canonical loop");
return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0);
}
/// Reaching the latch indicates the end of the loop body code. In the
/// canonical control flow, it only contains the increment of the induction
/// variable.
BasicBlock *getLatch() const {
assert(isValid() && "Requires a valid canonical loop");
return Latch;
}
/// Reaching the exit indicates no more iterations are being executed.
BasicBlock *getExit() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit;
}
/// The after block is intended for clean-up code such as lifetime end
/// markers. It is separate from the exit block to ensure, analogous to the
/// preheader, it having just a single entry edge and being free from PHI
/// nodes should there be multiple loop exits (such as from break
/// statements/cancellations).
BasicBlock *getAfter() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit->getSingleSuccessor();
}
/// Returns the llvm::Value containing the number of loop iterations. It must
/// be valid in the preheader and always interpreted as an unsigned integer of
/// any bit-width.
Value *getTripCount() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *CmpI = &Cond->front();
assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
return CmpI->getOperand(1);
}
/// Returns the instruction representing the current logical induction
/// variable. Always unsigned, always starting at 0 with an increment of one.
Instruction *getIndVar() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *IndVarPHI = &Header->front();
assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
return IndVarPHI;
}
/// Return the type of the induction variable (and the trip count).
Type *getIndVarType() const {
assert(isValid() && "Requires a valid canonical loop");
return getIndVar()->getType();
}
/// Return the insertion point for user code before the loop.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *Preheader = getPreheader();
return {Preheader, std::prev(Preheader->end())};
};
/// Return the insertion point for user code in the body.
OpenMPIRBuilder::InsertPointTy getBodyIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *Body = getBody();
return {Body, Body->begin()};
};
/// Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const {
assert(isValid() && "Requires a valid canonical loop");
BasicBlock *After = getAfter();
return {After, After->begin()};
};
Function *getFunction() const {
assert(isValid() && "Requires a valid canonical loop");
return Header->getParent();
}
/// Consistency self-check.
void assertOK() const;
/// Invalidate this loop. That is, the underlying IR does not fulfill the
/// requirements of an OpenMP canonical loop anymore.
void invalidate();
};
} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
|
GB_binop__cmplx_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__cmplx_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__cmplx_fp32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__cmplx_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__cmplx_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__cmplx_fp32)
// C=scalar+B GB (_bind1st__cmplx_fp32)
// C=scalar+B' GB (_bind1st_tran__cmplx_fp32)
// C=A+scalar GB (_bind2nd__cmplx_fp32)
// C=A'+scalar GB (_bind2nd_tran__cmplx_fp32)
// C type: GxB_FC32_t
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = GxB_CMPLXF (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
GxB_FC32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GxB_CMPLXF (GBX (Ax, pA, A_iso), 0)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GxB_CMPLXF (GBX (Bx, pB, B_iso), 0)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GxB_CMPLXF (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_CMPLX || GxB_NO_FP32 || GxB_NO_CMPLX_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__cmplx_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *restrict Cx = (GxB_FC32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__cmplx_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__cmplx_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__cmplx_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__cmplx_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = GxB_CMPLXF (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__cmplx_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC32_t *Cx = (GxB_FC32_t *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = GxB_CMPLXF (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = GxB_CMPLXF (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__cmplx_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = GxB_CMPLXF (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__cmplx_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
skel-w2e.c | #include <stdio.h>
#include <stdlib.h>
#include "ofmo-parallel.h"
#include "ofmo-def.h"
#include "skel-w2e.h"
#ifdef USE_CUDA
#include "cuda/cuda-integ.h"
#include "cuda/cudalib.h"
#endif
static double w2e[] = {
-1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0,
-1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0,
-1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0, -1.0e0,
-1.0e0, -1.0e0, -1.0e0,
-1.0e0, // buffered
};
#ifdef _OPENMP
#pragma omp threadprivate(w2e)
#endif
static char *s2e[] = {
"ssss", "psss", "psps", "ppss", "ppps", "pppp",
"dsss", "dsps", "dspp", "dsds", "dpss", "dpps",
"dppp", "dpds", "dpdp", "ddss", "ddps", "ddpp",
"ddds", "dddp", "dddd",
"buff"
};
static MPI_Comm comm = MPI_COMM_WORLD;
static int sync = false;
void setup_w2e(const MPI_Comm wcomm, const int optsync)
{
comm = wcomm;
sync = optsync;
}
static void w2e_Barrier(void)
{
#pragma omp barrier
#ifdef USE_MPI
#pragma omp master
{
MPI_Barrier(comm);
}
#pragma omp barrier
#endif
}
static double t0=0.0;
void start_w2e(void)
{
if (sync) w2e_Barrier();
t0 = MPI_Wtime();
}
void set_w2e(int Labcd)
{
double t1;
t1 = MPI_Wtime();
if (sync) w2e_Barrier();
if (Labcd<0) {Labcd = sizeof(w2e)/sizeof(double) - 1;}
if (w2e[Labcd]<0) w2e[Labcd] = 0.0;
w2e[Labcd] += t1-t0;
}
void print_w2e(void)
{
int i;
int n = sizeof(w2e)/sizeof(double);
#pragma omp master
{
printf("--- w2e ---\n");
for (i=0; i<n; i++) {
// if (w2e[i]>=0) {
#ifndef USE_CUDA
printf("%4s: %8.4f\n",s2e[i],w2e[i]);
#else
int nb=0, nt=0;
if (dim2e[i][0]!=0&&cuda_get_numBlocks()!=0) {
nb = dim2e[i][0];
nt = dim2e[i][1];
}
printf("%4s: %8.4f (%3d,%3d)\n",s2e[i],w2e[i],nb,nt);
#endif
// }
}
printf("-----------\n");
}
}
|
GB_unop__acos_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__acos_fp32_fp32)
// op(A') function: GB (_unop_tran__acos_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = acosf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = acosf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = acosf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOS || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__acos_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = acosf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = acosf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__acos_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_fp64_fp64
// op(A') function: GB_tran__minv_fp64_fp64
// C type: double
// A type: double
// cast: double cij = (double) aij
// unaryop: cij = 1./aij
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = 1./x ;
// casting
#define GB_CASTING(z, x) \
double z = (double) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_fp64_fp64
(
double *restrict Cx,
const double *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
kvstore_dist_server.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file mxnet_node.h
* \brief implement mxnet nodes
*/
#ifndef MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#define MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
#include <queue>
#include <string>
#include <mutex>
#include <condition_variable>
#include <memory>
#include <functional>
#include <future>
#include <vector>
#include "ps/ps.h"
#include "mxnet/kvstore.h"
#include "../operator/tensor/elemwise_binary_op-inl.h"
#include "../operator/tensor/init_op.h"
namespace mxnet {
namespace kvstore {
static const int kRowSparsePushPull = 1;
static const int kDefaultPushPull = 0;
static const int kStopServer = -1;
static const int kSyncMode = -2;
/**
* \brief executor runs a function using the thread called \ref Start
*/
class Executor {
public:
/**
* \brief start the executor
*/
void Start() {
std::unique_lock<std::mutex> lk(mu_);
while (true) {
cond_.wait(lk, [this]{return !queue_.empty();});
Block blk = std::move(queue_.front());
queue_.pop();
lk.unlock();
if (blk.f) {
blk.f(); blk.p->set_value();
} else {
blk.p->set_value(); break;
}
lk.lock();
}
}
/**
* \brief function
*/
typedef std::function<void()> Func;
/**
* \brief let the thread called \ref Start to exec a function. threadsafe
*/
void Exec(const Func& func) {
Block blk(func);
auto fut = blk.p->get_future();
{
std::lock_guard<std::mutex> lk(mu_);
queue_.push(std::move(blk));
cond_.notify_one();
}
fut.wait();
}
/**
* \brief stop the thread, threadsafe
*/
void Stop() {
Exec(Func());
}
private:
struct Block {
explicit Block(const Func& func) : f(func), p(std::make_shared<std::promise<void>>()) { }
Func f;
std::shared_ptr<std::promise<void>> p;
};
std::queue<Block> queue_;
std::mutex mu_;
std::condition_variable cond_;
};
class KVStoreDistServer {
public:
KVStoreDistServer() {
using namespace std::placeholders;
ps_server_ = new ps::KVServer<float>(0);
static_cast<ps::SimpleApp*>(ps_server_)->set_request_handle(
std::bind(&KVStoreDistServer::CommandHandle, this, _1, _2));
ps_server_->set_request_handle(
std::bind(&KVStoreDistServer::DataHandleEx, this, _1, _2, _3));
sync_mode_ = false;
log_verbose_ = dmlc::GetEnv("MXNET_KVSTORE_DIST_ROW_SPARSE_VERBOSE", false);
}
~KVStoreDistServer() {
delete ps_server_;
}
void set_controller(const KVStore::Controller& controller) {
CHECK(controller);
controller_ = controller;
}
void set_updater(const KVStore::Updater& updater) {
CHECK(updater);
updater_ = updater;
}
/**
* \brief blocked until received the command \a kSyncMode
*/
void Run() {
exec_.Start();
}
private:
struct MergeBuf {
std::vector<ps::KVMeta> request;
NDArray array;
};
void CommandHandle(const ps::SimpleData& recved, ps::SimpleApp* app) {
if (recved.head == kStopServer) {
exec_.Stop();
} else if (recved.head == kSyncMode) {
sync_mode_ = true;
} else {
// let the main thread to execute ctrl, which is necessary for python
exec_.Exec([this, recved]() {
CHECK(controller_);
controller_(recved.head, recved.body);
});
}
app->Response(recved);
}
void DataHandleEx(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t>& req_data,
ps::KVServer<real_t>* server) {
if (req_meta.cmd == kRowSparsePushPull) {
DataHandleRowSparse(req_meta, req_data, server);
} else {
DataHandleDefault(req_meta, req_data, server);
}
return;
}
inline void ApplyUpdates(const int key, MergeBuf *merged, NDArray *stored,
ps::KVServer<real_t>* server) {
if (merged->request.size() == (size_t) ps::NumWorkers()) {
// let the main thread to execute updater_, which is necessary for python
if (updater_) {
exec_.Exec([this, key, merged, stored](){
CHECK(updater_);
updater_(key, merged->array, stored);
});
} else {
// if no updater, just copy
CopyFromTo(merged->array, stored);
}
if (log_verbose_) {
LOG(INFO) << "sync response to " << merged->request.size() << " workers";
}
for (const auto& req : merged->request) {
server->Response(req);
}
merged->request.clear();
stored->WaitToRead();
} else {
merged->array.WaitToRead();
}
}
void DecodeRowIds(const ps::SArray<ps::Key> &keys, int64_t *indices,
const int64_t master_key, const int64_t num_rows) {
indices[0] = 0;
for (int64_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(keys[i]);
auto row_id = key - master_key;
indices[i - 1] = row_id;
}
}
void DataHandleRowSparse(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t>& req_data,
ps::KVServer<real_t>* server) {
int master_key = DecodeKey(req_data.keys[0]);
auto num_rows = req_data.keys.size() - 1;
auto& stored = store_[master_key];
if (req_meta.push) {
CHECK_GT(req_data.lens.size(), 0) << "req_data.lens cannot be empty";
CHECK_EQ(req_data.lens[0], 0);
real_t* data = req_data.vals.data();
if (stored.is_none()) {
if (log_verbose_) LOG(INFO) << "initial push: " << master_key;
// initialization
CHECK_GT(num_rows, 0) << "init with empty data is not supported";
auto unit_len = req_data.lens[1];
CHECK_GT(unit_len, 0);
size_t ds[] = {num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
CHECK_EQ(req_data.vals.size(), num_rows * unit_len);
TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*)
NDArray recved = NDArray(recv_blob, 0);
stored = NDArray(kRowSparseStorage, dshape, Context());
Engine::Get()->PushSync([recved, stored](RunContext ctx) {
NDArray rsp = stored;
stored.CheckAndAlloc({mshadow::Shape1(recved.shape()[0])});
mshadow::Stream<cpu> *s = ctx.get_stream<cpu>();
op::PopulateFullIdxRspImpl(s, &rsp);
mshadow::Copy(rsp.data().FlatTo1D<cpu, float>(),
recved.data().FlatTo1D<cpu, float>(), s);
}, recved.ctx(), {recved.var()}, {stored.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
stored.WaitToRead();
server->Response(req_meta);
return;
}
// synced push
if (sync_mode_) {
if (log_verbose_) LOG(INFO) << "sync push: " << master_key << " " << req_data.keys;
auto& merged = merge_buf_[master_key];
if (merged.array.is_none()) {
merged.array = NDArray(kRowSparseStorage, stored.shape(), Context());
}
if (num_rows == 0) {
// reset to zeros
if (merged.request.size() == 0) {
merged.array = NDArray(kRowSparseStorage, stored.shape(), Context());
} else {
// nothing to aggregate
}
merged.request.push_back(req_meta);
ApplyUpdates(master_key, &merged, &stored, server);
return;
}
auto unit_len = req_data.lens[1];
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
// data
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*)
// row_sparse NDArray
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
if (merged.request.size() == 0) {
CopyFromTo(recved, &merged.array, 0);
} else {
NDArray out(kRowSparseStorage, stored.shape(), Context());
std::vector<Engine::VarHandle> const_vars;
const_vars.push_back(recved.var());
const_vars.push_back(merged.array.var());
// accumulate row_sparse gradients
// TODO(haibin) override + operator for row_sparse NDArray
// instead of calling BinaryComputeRspRsp directly
using namespace mshadow;
Engine::Get()->PushSync([recved, merged, out](RunContext ctx) {
std::vector<NDArray> inputs, outputs;
inputs.push_back(recved);
inputs.push_back(merged.array);
outputs.push_back(out);
op::ElemwiseBinaryOp::ComputeEx<cpu, mshadow::op::plus>(
{}, {}, inputs, {kWriteTo}, outputs);
}, recved.ctx(), const_vars, {out.var()},
FnProperty::kNormal, 0, PROFILER_MESSAGE_FUNCNAME);
CopyFromTo(out, &merged.array, 0);
}
merged.request.push_back(req_meta);
ApplyUpdates(master_key, &merged, &stored, server);
} else {
// async push
if (log_verbose_) LOG(INFO) << "async push: " << master_key;
if (num_rows == 0) {
server->Response(req_meta);
return;
}
auto unit_len = req_data.lens[1];
CHECK_GT(unit_len, 0);
// indices
std::vector<int64_t> indices(num_rows);
DecodeRowIds(req_data.keys, indices.data(), master_key, num_rows);
TBlob idx_blob(indices.data(), mshadow::Shape1(num_rows), cpu::kDevMask);
size_t ds[] = {(size_t) num_rows, (size_t) unit_len};
TShape dshape(ds, ds + 2);
TBlob recv_blob(data, dshape, cpu::kDevMask); // NOLINT(*)
NDArray recved(kRowSparseStorage, stored.shape(), recv_blob, {idx_blob}, 0);
exec_.Exec([this, master_key, &recved, &stored](){
CHECK(updater_);
updater_(master_key, recved, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else {
// pull
if (log_verbose_) LOG(INFO) << "pull: " << master_key;
ps::KVPairs<real_t> response;
if (num_rows == 0) {
std::vector<int> lens(req_data.keys.size(), 0);
response.keys = req_data.keys;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
return;
}
CHECK(!stored.is_none()) << "init " << master_key << " first";
auto shape = stored.shape();
auto unit_len = shape.ProdShape(1, shape.ndim());
const float* data = stored.data().dptr<float>();
auto len = unit_len * num_rows;
// concat values
response.vals.resize(len);
#pragma omp parallel for
for (size_t i = 1; i <= num_rows; i++) {
int key = DecodeKey(req_data.keys[i]);
int64_t row_id = key - master_key;
const auto src = data + row_id * unit_len;
auto begin = (i - 1) * unit_len;
auto end = i * unit_len;
response.vals.segment(begin, end).CopyFrom(src, unit_len);
}
// setup response
response.keys = req_data.keys;
std::vector<int> lens(req_data.keys.size(), unit_len);
lens[0] = 0;
response.lens.CopyFrom(lens.begin(), lens.end());
server->Response(req_meta, response);
}
}
void DataHandleDefault(const ps::KVMeta& req_meta,
const ps::KVPairs<real_t> &req_data,
ps::KVServer<real_t>* server) {
CHECK_EQ(req_meta.cmd, kDefaultPushPull);
// do some check
CHECK_EQ(req_data.keys.size(), (size_t)1);
if (req_meta.push) {
CHECK_EQ(req_data.lens.size(), (size_t)1);
CHECK_EQ(req_data.vals.size(), (size_t)req_data.lens[0]);
}
int key = DecodeKey(req_data.keys[0]);
auto& stored = store_[key];
// there used several WaitToRead, this is because \a recved's memory
// could be deallocated when this function returns. so we need to make sure
// the operators with \a NDArray are actually finished
if (req_meta.push) {
size_t ds[] = {(size_t)req_data.lens[0]};
TShape dshape(ds, ds + 1);
TBlob recv_blob((real_t*)req_data.vals.data(), // NOLINT(*)
dshape, cpu::kDevMask);
NDArray recved = NDArray(recv_blob, 0);
if (stored.is_none()) {
// initialization
stored = NDArray(dshape, Context());
CopyFromTo(recved, &stored, 0);
server->Response(req_meta);
stored.WaitToRead();
} else if (sync_mode_) {
// synced push
auto& merged = merge_buf_[key];
if (merged.array.is_none()) {
merged.array = NDArray(dshape, Context());
}
if (merged.request.size() == 0) {
CopyFromTo(recved, &merged.array, 0);
} else {
merged.array += recved;
}
merged.request.push_back(req_meta);
ApplyUpdates(key, &merged, &stored, server);
} else {
// async push
exec_.Exec([this, key, &recved, &stored](){
CHECK(updater_);
updater_(key, recved, &stored);
});
server->Response(req_meta);
stored.WaitToRead();
}
} else {
// pull
ps::KVPairs<real_t> response;
CHECK(!stored.is_none()) << "init " << key << " first";
auto len = stored.shape().Size();
response.keys = req_data.keys;
response.lens = {len};
// TODO(mli) try to remove this CopyFrom
response.vals.CopyFrom(static_cast<const float*>(stored.data().dptr_), len);
server->Response(req_meta, response);
}
}
int DecodeKey(ps::Key key) {
auto kr = ps::Postoffice::Get()->GetServerKeyRanges()[ps::MyRank()];
return key - kr.begin();
}
/**
* \brief user defined
*/
bool sync_mode_;
KVStore::Controller controller_;
KVStore::Updater updater_;
std::unordered_map<int, NDArray> store_;
std::unordered_map<int, MergeBuf> merge_buf_;
Executor exec_;
ps::KVServer<float>* ps_server_;
// whether to LOG verbose information
bool log_verbose_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_KVSTORE_DIST_SERVER_H_
|
pfem_2_monolithic_slip_scheme.h | /*
==============================================================================
KratosFluidDynamicsApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi, Janosch Stascheit, Felix Nagel
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
janosch.stascheit@rub.de
nagel@sd.rub.de
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
- Ruhr-University Bochum, Institute for Structural Mechanics, Germany
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
#if !defined(KRATOS_PFEM2_MONOLITHIC_SLIP_SCHEME )
#define KRATOS_PFEM2_MONOLITHIC_SLIP_SCHEME
/* System includes */
/* External includes */
#include "boost/smart_ptr.hpp"
/* Project includes */
#include "includes/define.h"
#include "includes/model_part.h"
#include "solving_strategies/schemes/scheme.h"
#include "includes/variables.h"
#include "includes/deprecated_variables.h"
#include "containers/array_1d.h"
#include "utilities/openmp_utils.h"
#include "utilities/coordinate_transformation_utilities.h"
#include "processes/process.h"
namespace Kratos {
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
template<class TSparseSpace,
class TDenseSpace //= DenseSpace<double>
>
class PFEM2MonolithicSlipScheme : public Scheme<TSparseSpace, TDenseSpace> {
public:
/**@name Type Definitions */
/*@{ */
//typedef boost::shared_ptr< ResidualBasedPredictorCorrectorBossakScheme<TSparseSpace,TDenseSpace> > Pointer;
KRATOS_CLASS_POINTER_DEFINITION(PFEM2MonolithicSlipScheme);
typedef Scheme<TSparseSpace, TDenseSpace> BaseType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename Element::DofsVectorType DofsVectorType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef Element::GeometryType GeometryType;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/** Constructor without a turbulence model
*/
PFEM2MonolithicSlipScheme(unsigned int DomainSize)
:
Scheme<TSparseSpace, TDenseSpace>(),
mRotationTool(DomainSize,DomainSize+1,SLIP)
{
}
/** Destructor.
*/
virtual ~PFEM2MonolithicSlipScheme() {}
/*@} */
/**@name Operators
*/
/*@{ */
/**
Performing the update of the solution.
*/
//***************************************************************************
virtual void Update(ModelPart& r_model_part,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dv,
TSystemVectorType& b) override
{
KRATOS_TRY;
mRotationTool.RotateVelocities(r_model_part);
BasicUpdateOperations(r_model_part, rDofSet, A, Dv, b);
mRotationTool.RecoverVelocities(r_model_part);
KRATOS_CATCH("")
}
//***************************************************************************
virtual void BasicUpdateOperations(ModelPart& rModelPart,
DofsArrayType& rDofSet,
TSystemMatrixType& A,
TSystemVectorType& Dv,
TSystemVectorType& b)
{
KRATOS_TRY
int NumThreads = OpenMPUtils::GetNumThreads();
OpenMPUtils::PartitionVector DofSetPartition;
OpenMPUtils::DivideInPartitions(rDofSet.size(), NumThreads, DofSetPartition);
//update of velocity (by DOF)
#pragma omp parallel
{
int k = OpenMPUtils::ThisThread();
typename DofsArrayType::iterator DofSetBegin = rDofSet.begin() + DofSetPartition[k];
typename DofsArrayType::iterator DofSetEnd = rDofSet.begin() + DofSetPartition[k + 1];
for (typename DofsArrayType::iterator itDof = DofSetBegin; itDof != DofSetEnd; itDof++) {
if (itDof->IsFree()) {
itDof->GetSolutionStepValue() += TSparseSpace::GetValue(Dv, itDof->EquationId());
}
}
}
KRATOS_CATCH("")
}
//***************************************************************************
/** this function is designed to be called in the builder and solver
to introduce
the selected time integration scheme. It "asks" the matrix needed to
the element and
performs the operations needed to introduce the seected time
integration scheme.
this function calculates at the same time the contribution to the
LHS and to the RHS
of the system
*/
void CalculateSystemContributions(Element& rCurrentElement,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
const ProcessInfo& CurrentProcessInfo) override
{
KRATOS_TRY
//Initializing the non linear iteration for the current element
//KRATOS_WATCH(LHS_Contribution);
//basic operations for the element considered
rCurrentElement.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo);
// If there is a slip condition, apply it on a rotated system of coordinates
mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry());
mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentElement.GetGeometry());
KRATOS_CATCH("")
}
void CalculateRHSContribution(Element& rCurrentElement,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
const ProcessInfo& CurrentProcessInfo) override
{
//Initializing the non linear iteration for the current element
//basic operations for the element considered
rCurrentElement.CalculateRightHandSide(RHS_Contribution, CurrentProcessInfo);
rCurrentElement.EquationIdVector(EquationId, CurrentProcessInfo);
// If there is a slip condition, apply it on a rotated system of coordinates
mRotationTool.Rotate(RHS_Contribution,rCurrentElement.GetGeometry());
mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentElement.GetGeometry());
}
/** functions totally analogous to the precedent but applied to
the "condition" objects
*/
virtual void CalculateSystemContributions(Condition& rCurrentCondition,
LocalSystemMatrixType& LHS_Contribution,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
const ProcessInfo& CurrentProcessInfo) override
{
KRATOS_TRY
//KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED");
rCurrentCondition.CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
rCurrentCondition.EquationIdVector(EquationId, CurrentProcessInfo);
// Rotate contributions (to match coordinates for slip conditions)
//mRotationTool.Rotate(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry());
//mRotationTool.ApplySlipCondition(LHS_Contribution,RHS_Contribution,rCurrentCondition->GetGeometry());
KRATOS_CATCH("")
}
virtual void CalculateRHSContribution(Condition& rCurrentCondition,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
const ProcessInfo& rCurrentProcessInfo) override
{
KRATOS_TRY;
//KRATOS_WATCH("CONDITION LOCALVELOCITYCONTRIBUTION IS NOT DEFINED");
//Initializing the non linear iteration for the current condition
//basic operations for the element considered
rCurrentCondition.CalculateRightHandSide(RHS_Contribution,rCurrentProcessInfo);
rCurrentCondition.EquationIdVector(EquationId,rCurrentProcessInfo);
// Rotate contributions (to match coordinates for slip conditions)
mRotationTool.Rotate(RHS_Contribution,rCurrentCondition.GetGeometry());
mRotationTool.ApplySlipCondition(RHS_Contribution,rCurrentCondition.GetGeometry());
KRATOS_CATCH("");
}
//*************************************************************************************
//*************************************************************************************
virtual void InitializeSolutionStep(ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
const ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
Scheme<TSparseSpace, TDenseSpace>::InitializeSolutionStep(r_model_part, A, Dx, b);
double DeltaTime = CurrentProcessInfo[DELTA_TIME];
if (DeltaTime == 0)
KRATOS_THROW_ERROR(std::logic_error, "detected delta_time = 0 ... check if the time step is created correctly for the current model part", "");
}
//*************************************************************************************
//*************************************************************************************
virtual void InitializeNonLinIteration(ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b) override
{
KRATOS_TRY
KRATOS_CATCH("")
}
virtual void FinalizeNonLinIteration(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override
{
/*
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
//if orthogonal subscales are computed
if (CurrentProcessInfo[OSS_SWITCH] == 1.0) {
if (rModelPart.GetCommunicator().MyPID() == 0)
std::cout << "Computing OSS projections" << std::endl;
for (typename ModelPart::NodesContainerType::iterator ind = rModelPart.NodesBegin(); ind != rModelPart.NodesEnd(); ind++) {
noalias(ind->FastGetSolutionStepValue(ADVPROJ)) = ZeroVector(3);
ind->FastGetSolutionStepValue(DIVPROJ) = 0.0;
ind->FastGetSolutionStepValue(NODAL_AREA) = 0.0;
}//end of loop over nodes
//loop on nodes to compute ADVPROJ CONVPROJ NODALAREA
array_1d<double, 3 > output;
for (typename ModelPart::ElementsContainerType::iterator elem = rModelPart.ElementsBegin(); elem != rModelPart.ElementsEnd(); elem++)
{
elem->Calculate(ADVPROJ, output, CurrentProcessInfo);
}
rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA);
rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ);
rModelPart.GetCommunicator().AssembleCurrentData(ADVPROJ);
// Correction for periodic conditions
this->PeriodicConditionProjectionCorrection(rModelPart);
for (typename ModelPart::NodesContainerType::iterator ind = rModelPart.NodesBegin(); ind != rModelPart.NodesEnd(); ind++)
{
if (ind->FastGetSolutionStepValue(NODAL_AREA) == 0.0)
{
ind->FastGetSolutionStepValue(NODAL_AREA) = 1.0;
//KRATOS_WATCH("*********ATTENTION: NODAL AREA IS ZERRROOOO************");
}
const double Area = ind->FastGetSolutionStepValue(NODAL_AREA);
ind->FastGetSolutionStepValue(ADVPROJ) /= Area;
ind->FastGetSolutionStepValue(DIVPROJ) /= Area;
}
}
*/
}
void FinalizeSolutionStep(ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override
{
/*
Element::EquationIdVectorType EquationId;
LocalSystemVectorType RHS_Contribution;
LocalSystemMatrixType LHS_Contribution;
ProcessInfo& CurrentProcessInfo = rModelPart.GetProcessInfo();
for (ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); ++itNode)
{
itNode->FastGetSolutionStepValue(REACTION_X,0) = 0.0;
itNode->FastGetSolutionStepValue(REACTION_Y,0) = 0.0;
itNode->FastGetSolutionStepValue(REACTION_Z,0) = 0.0;
}
for (ModelPart::ElementsContainerType::ptr_iterator itElem = rModelPart.Elements().ptr_begin(); itElem != rModelPart.Elements().ptr_end(); ++itElem)
{
//KRATOS_WATCH(LHS_Contribution);
//basic operations for the element considered
(*itElem)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
(*itElem)->EquationIdVector(EquationId, CurrentProcessInfo);
GeometryType& rGeom = (*itElem)->GetGeometry();
unsigned int NumNodes = rGeom.PointsNumber();
unsigned int Dimension = rGeom.WorkingSpaceDimension();
unsigned int index = 0;
for (unsigned int i = 0; i < NumNodes; i++)
{
rGeom[i].FastGetSolutionStepValue(REACTION_X,0) -= RHS_Contribution[index++];
rGeom[i].FastGetSolutionStepValue(REACTION_Y,0) -= RHS_Contribution[index++];
if (Dimension == 3) rGeom[i].FastGetSolutionStepValue(REACTION_Z,0) -= RHS_Contribution[index++];
index++; // skip pressure dof
}
}
rModelPart.GetCommunicator().AssembleCurrentData(REACTION);
*/
// Base scheme calls FinalizeSolutionStep method of elements and conditions
Scheme<TSparseSpace, TDenseSpace>::FinalizeSolutionStep(rModelPart, A, Dx, b);
}
//************************************************************************************************
//************************************************************************************************
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
/*@} */
/**@name Protected Operators*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
CoordinateTransformationUtils<LocalSystemMatrixType,LocalSystemVectorType,double> mRotationTool;
/*@} */
/**@name Private Operators*/
/*@{ */
/*@} */
/**@name Private Operations*/
/*@{ */
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class Scheme */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_RESIDUALBASED_PREDICTOR_CORRECTOR_BOSSAK_SCHEME defined */
|
GB_binop__bget_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bget_int64
// A.*B function (eWiseMult): GB_AemultB__bget_int64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bget_int64
// C+=b function (dense accum): GB_Cdense_accumb__bget_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_int64
// C=scalar+B GB_bind1st__bget_int64
// C=scalar+B' GB_bind1st_tran__bget_int64
// C=A+scalar GB_bind2nd__bget_int64
// C=A'+scalar GB_bind2nd_tran__bget_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_BITGET (aij, bij, int64_t, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_BITGET (x, y, int64_t, 64) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_INT64 || GxB_NO_BGET_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bget_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bget_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bget_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bget_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bget_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bget_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = GB_BITGET (x, bij, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bget_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = GB_BITGET (aij, y, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (x, aij, int64_t, 64) ; \
}
GrB_Info GB_bind1st_tran__bget_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (aij, y, int64_t, 64) ; \
}
GrB_Info GB_bind2nd_tran__bget_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_uint64_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint64_int32
// op(A') function: GB_tran__minv_uint64_int32
// C type: uint64_t
// A type: int32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 64)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 64) ;
// casting
#define GB_CASTING(z, aij) \
uint64_t z = (uint64_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint64_int32
(
uint64_t *Cx, // Cx and Ax may be aliased
int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint64_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: (none)
// op(A') function: GB_unop_tran__identity_fc32_fc32
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_fc32_fc32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
zgelqf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_gelqf
*
* Computes tile LQ factorization of a complex m-by-n matrix A.
* The factorization has the form
* \f[ A = L \times Q \f],
* where L is a lower trapezoidal with positive diagonal and Q is a matrix with
* orthonormal rows.
*
*******************************************************************************
*
* @param[in] m
* The number of rows of the matrix A. m >= 0.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in,out] pA
* On entry, pointer to the m-by-n matrix A.
* On exit, the elements on and below the diagonal of the array
* contain the m-by-min(m,n) lower trapezoidal matrix L (L is lower
* triangular if M <= N); the elements above the diagonal represent
* the unitary matrix Q as a product of elementary reflectors, stored
* by tiles.
*
* @param[in] lda
* The leading dimension of the array A. lda >= max(1,m).
*
* @param[out] T
* On exit, auxiliary factorization data, required by plasma_zgelqs
* to solve the system of equations.
* Matrix of T is allocated inside this function and needs to be
* destroyed by plasma_desc_destroy.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_zgelqf
* @sa plasma_cgelqf
* @sa plasma_dgelqf
* @sa plasma_sgelqf
* @sa plasma_zgelqs
*
******************************************************************************/
int plasma_zgelqf(int m, int n,
plasma_complex64_t *pA, int lda,
plasma_desc_t *T)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (m < 0) {
plasma_error("illegal value of m");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (lda < imax(1, m)) {
plasma_error("illegal value of lda");
return -4;
}
// quick return
if (imin(m, n) == 0)
return PlasmaSuccess;
// Set tiling parameters.
int ib = plasma->ib;
int nb = plasma->nb;
int householder_mode = plasma->householder_mode;
// Create tile matrix.
plasma_desc_t A;
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Prepare descriptor T.
retval = plasma_descT_create(A, ib, householder_mode, T);
if (retval != PlasmaSuccess) {
plasma_error("plasma_descT_create() failed");
return retval;
}
// Allocate workspace.
plasma_workspace_t work;
size_t lwork = nb + ib*nb; // gelqt: tau + work
retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble);
if (retval != PlasmaSuccess) {
plasma_error("plasma_workspace_create() failed");
return retval;
}
// Create sequence.
plasma_sequence_t *sequence = NULL;
retval = plasma_sequence_create(&sequence);
if (retval != PlasmaSuccess) {
plasma_error("plasma_sequence_create() failed");
return retval;
}
// Initialize request.
plasma_request_t request = PlasmaRequestInitializer;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, sequence, &request);
// Call the tile async function.
plasma_omp_zgelqf(A, *T, work, sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(A, pA, lda, sequence, &request);
}
// implicit synchronization
plasma_workspace_destroy(&work);
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
// Return status.
int status = sequence->status;
plasma_sequence_destroy(sequence);
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_gelqf
*
* Computes the tile LQ factorization of a matrix.
* Non-blocking tile version of plasma_zgelqf().
* May return before the computation is finished.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in,out] A
* Descriptor of matrix A.
* A is stored in the tile layout.
*
* @param[out] T
* Descriptor of matrix T.
* On exit, auxiliary factorization data, required by plasma_zgelqs to
* solve the system of equations.
*
* @param[in] work
* Workspace for the auxiliary arrays needed by some coreblas kernels.
* For LQ factorization, contains preallocated space for tau and work
* arrays. Allocated by the plasma_workspace_create function.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes).
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zgelqf
* @sa plasma_omp_cgelqf
* @sa plasma_omp_dgelqf
* @sa plasma_omp_sgelqf
* @sa plasma_omp_zgelqs
*
******************************************************************************/
void plasma_omp_zgelqf(plasma_desc_t A, plasma_desc_t T,
plasma_workspace_t work,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(T) != PlasmaSuccess) {
plasma_error("invalid T");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (imin(A.m, A.n) == 0)
return;
// Call the parallel function.
if (plasma->householder_mode == PlasmaTreeHouseholder) {
plasma_pzgelqf_tree(A, T, work, sequence, request);
}
else {
plasma_pzgelqf(A, T, work, sequence, request);
}
}
|
denseraster.h | #pragma once
#include "gdx/cell.h"
#include "gdx/cpupredicates-private.h"
#include "gdx/exception.h"
#include "gdx/nodatapredicates-private.h"
#include "gdx/rasterchecks.h"
#include "gdx/rasteriterator.h"
#include "gdx/rastermetadata.h"
#include "gdx/simd.h"
#include "infra/cast.h"
#include "infra/span.h"
#include "infra/string.h"
#include "rasterutils-private.h"
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4244 4242 4127 4005)
#endif
#include <Vc/Allocator>
#include <Vc/common/simdize.h>
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#include <algorithm>
#include <cassert>
#include <type_traits>
#include <vector>
namespace gdx {
template <typename T>
class DenseRaster
{
public:
using value_type = T;
using size_type = std::size_t;
using data_type = std::vector<T, Vc::Allocator<T>>;
using nodata_type = std::optional<value_type>;
using pointer = T*;
using const_pointer = const T*;
using iterator = pointer;
using const_iterator = const_pointer;
static constexpr bool raster_type_has_nan = std::numeric_limits<T>::has_quiet_NaN;
static constexpr bool with_nodata = true;
static constexpr T NaN = std::numeric_limits<T>::quiet_NaN();
static constexpr bool has_nan()
{
return raster_type_has_nan;
}
static constexpr bool simd_supported()
{
return !(std::is_same_v<uint8_t, T> || std::is_same_v<int64_t, T> || std::is_same_v<uint64_t, T>);
}
DenseRaster() = default;
DenseRaster(int32_t rows, int32_t cols)
: _meta(rows, cols)
, _data(rows * cols)
{
}
explicit DenseRaster(RasterMetadata meta)
: _meta(std::move(meta))
, _data(_meta.rows * _meta.cols)
{
init_nodata_values();
}
DenseRaster(int32_t rows, int32_t cols, T fillValue)
: DenseRaster(RasterMetadata(rows, cols), fillValue)
{
}
DenseRaster(const RasterMetadata& meta, T fillValue)
: _meta(meta)
, _data(meta.rows * meta.cols)
{
if constexpr (raster_type_has_nan) {
// make sure we fill tha raster with NaNs if the fill value is the nodata value
if (_meta.nodata.has_value() && fillValue == static_cast<T>(*_meta.nodata)) {
fillValue = NaN;
}
}
fill(fillValue);
}
DenseRaster(int32_t rows, int32_t cols, std::span<const T> data)
: DenseRaster(RasterMetadata(rows, cols), data)
{
}
DenseRaster(const RasterMetadata& meta, std::span<const T> data)
: _meta(meta)
, _data(meta.rows * meta.cols)
{
throw_on_datasize_mismatch(meta.rows, meta.cols, data.size());
std::copy(data.begin(), data.end(), _data.data());
init_nodata_values();
}
DenseRaster(const RasterMetadata& meta, data_type&& data)
: _meta(meta)
, _data(data)
{
if (inf::truncate<int32_t>(_data.size()) != meta.rows * meta.cols) {
throw InvalidArgument("Invalid data size provided");
}
init_nodata_values();
}
DenseRaster(DenseRaster<T>&&) noexcept = default;
DenseRaster(const DenseRaster<T>& other) = delete;
DenseRaster& operator=(DenseRaster<T>&&) = default;
DenseRaster& operator=(const DenseRaster<T>& other) = delete;
void resize_and_fill(int32_t rows, int32_t cols, value_type value)
{
resize(rows, cols);
fill(value);
}
void resize(int32_t rows, int32_t cols)
{
_meta.rows = rows;
_meta.cols = cols;
_data.resize(rows * cols);
}
void resize(int32_t rows, int32_t cols, std::optional<double> nodata)
{
_meta.rows = rows;
_meta.cols = cols;
_meta.nodata = nodata;
_data.resize(rows * cols);
}
void set_metadata(RasterMetadata meta)
{
if (meta.rows * meta.cols != ssize()) {
throw InvalidArgument("Cannot change metadata: invalid size");
}
_meta = std::move(meta);
}
DenseRaster<T> copy() const
{
DenseRaster<T> dst(_meta);
dst._data = _data;
return dst;
}
auto begin()
{
return _data.begin();
}
auto begin() const
{
return cbegin();
}
auto cbegin() const
{
return _data.cbegin();
}
auto end()
{
return _data.end();
}
auto end() const
{
return cend();
}
auto cend() const
{
return _data.cend();
}
const value_type* data() const noexcept
{
return _data.data();
}
value_type* data() noexcept
{
return _data.data();
}
bool has_nodata() const noexcept
{
if (_meta.nodata.has_value()) {
if constexpr (raster_type_has_nan) {
return std::any_of(begin(), end(), [](T value) { return std::isnan(value); });
} else {
return std::any_of(begin(), end(), [nod = static_cast<T>(*_meta.nodata)](T value) { return value == nod; });
}
}
return false;
}
std::optional<T> nodata() const noexcept
{
return inf::optional_cast<T>(_meta.nodata);
}
std::size_t size() const noexcept
{
return _data.size();
}
std::ptrdiff_t ssize() const noexcept
{
assert(_data.size() <= std::size_t(std::numeric_limits<std::ptrdiff_t>::max()));
return static_cast<std::ptrdiff_t>(_data.size());
}
bool empty() const noexcept
{
return _data.size() == 0;
}
void collapse_data()
{
// no collapse needed for non floating point types
if constexpr (has_nan()) {
if (auto nod = nodata(); nod.has_value() && !std::isnan(*nod)) {
if constexpr (simd_supported()) {
simd::for_each(begin(), end(), [nodata = *nod](auto& value) {
value(std::isnan(value)) = nodata;
});
} else {
std::transform(begin(), end(), begin(), [nodata = *nod](T value) {
return std::isnan(value) ? nodata : value;
});
}
}
}
}
const RasterMetadata& metadata() const noexcept
{
return _meta;
}
void set_projection(int32_t epsg)
{
_meta.set_projection_from_epsg(epsg);
}
void clear_projection()
{
_meta.projection.clear();
}
void set_nodata(double newValue)
{
if constexpr (!raster_type_has_nan) {
if (std::isnan(newValue)) {
throw InvalidArgument("Nodata value cannot be NaN for integral rasters");
}
}
_meta.nodata = newValue;
}
void replace_nodata(T newValue)
{
const auto dataSize = _data.size();
for (std::size_t i = 0; i < dataSize; ++i) {
if (is_nodata(i)) {
_data[i] = newValue;
}
}
_meta.nodata.reset();
}
void turn_value_into_nodata(T value)
{
const auto dataSize = _data.size();
for (std::size_t i = 0; i < dataSize; ++i) {
if (_data[i] == value) {
mark_as_nodata(i);
}
}
}
// assigns the value to all the elements of the raster, even nodata
void fill(value_type value)
{
std::fill(_data.begin(), _data.end(), value);
}
// assigns the value to all the elements of the raster, leaving nodata values intact
void fill_values(value_type value)
{
if (auto nod = nodata(); nod.has_value()) {
if constexpr (simd_supported()) {
simd::for_each(_data.begin(), _data.end(), [value, nod = *nod](auto& v) {
v(v != nod) = value;
});
} else {
std::for_each(begin(), end(), [=](auto& v) {
if (!is_nodata_value(v)) {
v = value;
}
});
}
} else {
return fill(value);
}
}
// Makes all elements of the raster nodata values
void fill_with_nodata()
{
if (_meta.nodata.has_value()) {
if constexpr (raster_type_has_nan) {
fill(NaN);
} else {
fill(static_cast<T>(*_meta.nodata));
}
}
}
int32_t rows() const noexcept
{
return _meta.rows;
}
int32_t cols() const noexcept
{
return _meta.cols;
}
void mark_as_data(std::size_t /*index*/) noexcept
{
}
void mark_as_data(Cell /*cell*/) noexcept
{
}
void mark_as_data(int32_t /*row*/, int32_t /*col*/) noexcept
{
}
void mark_as_nodata(std::size_t index)
{
if (!_meta.nodata.has_value()) {
throw RuntimeError("mark_as_nodata called without nodata defined");
}
if constexpr (raster_type_has_nan) {
_data[index] = NaN;
} else {
_data[index] = static_cast<T>(*_meta.nodata);
}
}
void mark_as_nodata(int32_t row, int32_t col)
{
mark_as_nodata(index(row, col));
}
void mark_as_nodata(Cell cell)
{
mark_as_nodata(cell.r, cell.c);
}
std::optional<value_type> optional_value(std::size_t index) const noexcept
{
if (is_nodata(index)) {
return std::optional<value_type>();
} else {
return _data[index];
}
}
template <typename VarType>
std::optional<VarType> optional_value_as(std::size_t index) const noexcept
{
if (is_nodata(index)) {
return std::optional<VarType>();
} else {
return static_cast<VarType>(_data[index]);
}
}
bool is_nodata_value(T value) const noexcept
{
if constexpr (raster_type_has_nan) {
return std::isnan(value);
} else {
if (_meta.nodata.has_value()) {
return value == *_meta.nodata;
} else {
return false;
}
}
}
bool is_nodata(std::size_t index) const noexcept
{
if (_meta.nodata.has_value()) {
if constexpr (raster_type_has_nan) {
return std::isnan(_data[index]);
} else {
return _data[index] == static_cast<T>(*_meta.nodata);
}
}
return false;
}
bool is_nodata(const Cell& cell) const noexcept
{
return is_nodata(cell.r, cell.c);
}
bool is_nodata(int32_t r, int32_t c) const noexcept
{
if (_meta.nodata.has_value()) {
if constexpr (raster_type_has_nan) {
return std::isnan(_data[index(r, c)]);
} else {
return _data[index(r, c)] == static_cast<T>(*_meta.nodata);
}
}
return false;
}
bool tolerant_equal_to(const DenseRaster<T>& other, value_type tolerance = std::numeric_limits<value_type>::epsilon()) const noexcept
{
if (_meta != other._meta) {
return false;
}
return tolerant_data_equal_to(other, tolerance);
}
bool tolerant_data_equal_to(const DenseRaster<T>& other, value_type relTolerance = value_type(1e-05)) const noexcept
{
throw_on_size_mismatch(*this, other);
cpu::float_equal_to<T> comp(relTolerance);
const auto dataSize = size();
for (std::size_t i = 0; i < dataSize; ++i) {
if (is_nodata(i) != other.is_nodata(i)) {
return false;
}
if (!is_nodata(i) && !comp(_data[i], other[i])) {
return false;
}
}
return true;
}
/* Add the value to the cell, if the cell is nodata it will become data with the provided value */
void add_to_cell(Cell c, T value)
{
if (is_nodata(c)) {
(*this)[c] = value;
} else {
(*this)[c] += value;
}
}
bool operator==(const DenseRaster<T>& other) const noexcept
{
throw_on_size_mismatch(*this, other);
const auto dataSize = size();
for (std::size_t i = 0; i < dataSize; ++i) {
if (is_nodata(i) != other.is_nodata(i)) {
return false;
}
if (!is_nodata(i) && (_data[i] != other[i])) {
return false;
}
}
return true;
}
bool operator!=(const DenseRaster<T>& other) const noexcept
{
return !(*this == other);
}
DenseRaster<uint8_t> not_equals(const DenseRaster<T>& other) const noexcept
{
throw_on_size_mismatch(*this, other);
return perform_binary_operation<nodata::not_equal_to>(other);
}
template <typename TValue>
DenseRaster<uint8_t> not_equals(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
return perform_unary_operation<nodata::not_equal_to>(value);
}
template <typename TOther>
auto operator+(const DenseRaster<TOther>& other) const
{
return perform_raster_operation<std::plus<>>(other);
}
template <typename TValue>
auto operator+(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
return perform_scalar_operation<std::plus<>>(value);
}
template <typename TValue>
DenseRaster<T>& operator+=(TValue value)
{
return perform_scalar_operation_inplace<std::plus<>>(value);
}
//! Add values of the other raster to this raster
// - Nodata values of this raster will not be assigned
// - Nodata values of the other raster will become nodata in the result
template <typename TOther>
DenseRaster<T>& operator+=(const DenseRaster<TOther>& other)
{
return perform_raster_operation_in_place<std::plus<>>(other);
}
//! Add values of the other raster to this raster
// - Nodata values of this raster will become the value in the other raster
// - Nodata values of the other raster will become nodata in the result
template <typename TOther>
DenseRaster<T>& add_or_assign(const DenseRaster<TOther>& other)
{
throw_on_size_mismatch(*this, other);
if constexpr (simd_supported() && has_nan() && DenseRaster<TOther>::has_nan() && sizeof(T) == sizeof(TOther)) {
simd::transform(cbegin(), cend(), other.cbegin(), begin(), [](auto& v1, auto& v2) {
auto w = Vc::simd_cast<Vc::Vector<T, typename std::remove_reference_t<decltype(v1)>::abi>>(v2);
auto out = v1;
out(Vc::isnan(v1) && !Vc::isnan(w)) = T(0);
out(!Vc::isnan(w)) += w;
return out;
});
} else {
const auto dataSize = size();
for (std::size_t i = 0; i < dataSize; ++i) {
if (other.is_nodata(i)) {
continue;
}
if (is_nodata(i)) {
_data[i] = static_cast<T>(other[i]);
} else {
_data[i] += static_cast<T>(other[i]);
}
}
}
return *this;
}
template <typename TOther>
DenseRaster<T>& add_or_assign(TOther value)
{
static_assert(std::is_scalar_v<TOther>, "add_or_assign has to be called with a scalar value");
const auto val = static_cast<T>(value);
if constexpr (simd_supported() && has_nan()) {
simd::transform(cbegin(), cend(), begin(), [val](auto& v) {
auto out = v;
out(Vc::isnan(v)) = val;
out(!Vc::isnan(v)) += val;
return out;
});
} else {
const auto dataSize = size();
for (std::size_t i = 0; i < dataSize; ++i) {
if (is_nodata(i)) {
_data[i] = val;
} else {
_data[i] += val;
}
}
}
return *this;
}
DenseRaster<T>
operator-() const
{
if constexpr (std::is_unsigned_v<T>) {
throw RuntimeError("Minus operator applied to unsigned value");
} else {
DenseRaster<T> result(_meta, DenseRaster<T>::data_type(_data));
std::transform(result.begin(), result.end(), result.begin(), nodata::negate<T>(_meta.nodata));
return result;
}
}
template <typename TOther>
auto operator-(const DenseRaster<TOther>& other) const
{
return perform_raster_operation<std::minus<>>(other);
}
template <typename TValue>
auto operator-(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
return perform_scalar_operation<std::minus<>>(value);
}
template <typename TValue>
DenseRaster<T>& operator-=(TValue value)
{
return perform_scalar_operation_inplace<std::minus<>>(value);
}
template <typename TOther>
DenseRaster<T>& operator-=(const DenseRaster<TOther>& other)
{
return perform_raster_operation_in_place<std::minus<>>(other);
}
template <typename TOther>
auto operator*(const DenseRaster<TOther>& other) const
{
return perform_raster_operation<std::multiplies<>>(other);
}
template <typename TValue>
auto operator*(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
return perform_scalar_operation<std::multiplies<>>(value);
}
template <typename TValue>
DenseRaster<T>& operator*=(TValue value)
{
return perform_scalar_operation_inplace<std::multiplies<>>(value);
}
template <typename TOther>
DenseRaster<T>& operator*=(const DenseRaster<TOther>& other)
{
return perform_raster_operation_in_place<std::multiplies<>>(other);
}
template <typename TOther>
auto operator/(const DenseRaster<TOther>& other) const
{
return perform_raster_operation<std::divides<>>(other);
}
template <typename TValue>
auto operator/(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
if (value == 0) {
throw InvalidArgument("Division by zero");
}
return perform_scalar_operation<std::divides<>>(value);
}
template <typename TValue>
DenseRaster<T>& operator/=(TValue value)
{
return perform_scalar_operation_inplace<std::divides<>>(value);
}
template <typename TOther>
DenseRaster<T>& operator/=(const DenseRaster<TOther>& other)
{
return perform_raster_operation_in_place<std::divides<>>(other);
}
value_type& operator[](std::size_t index)
{
return _data[index];
}
value_type operator[](std::size_t index) const
{
return _data[index];
}
value_type& operator[](const Cell& cell)
{
return _data[index(cell.r, cell.c)];
}
const value_type& operator[](const Cell& cell) const
{
return _data[index(cell.r, cell.c)];
}
value_type& operator()(int32_t row, int32_t col)
{
return _data[index(row, col)];
}
const value_type& operator()(int32_t row, int32_t col) const
{
return _data[index(row, col)];
}
DenseRaster<uint8_t> operator!() const
{
return perform_unary_operation<nodata::logical_not>();
}
template <typename TOther>
DenseRaster<uint8_t> operator&&(const DenseRaster<TOther>& other) const
{
return perform_binary_operation<nodata::logical_and>(other);
}
template <typename TOther>
DenseRaster<uint8_t> operator||(const DenseRaster<TOther>& other) const
{
return perform_binary_operation<nodata::logical_or>(other);
}
template <typename TOther>
DenseRaster<uint8_t> operator>(const DenseRaster<TOther>& other) const
{
return perform_binary_operation<nodata::greater>(other);
}
DenseRaster<uint8_t> operator>(T threshold) const
{
return perform_unary_operation<nodata::greater>(threshold);
}
template <typename TOther>
DenseRaster<uint8_t> operator>=(const DenseRaster<TOther>& other) const
{
return perform_binary_operation<nodata::greater_equal>(other);
}
DenseRaster<uint8_t> operator>=(T threshold) const
{
return perform_unary_operation<nodata::greater_equal>(threshold);
}
template <typename TOther>
DenseRaster<uint8_t> operator<(const DenseRaster<TOther>& other) const
{
return perform_binary_operation<nodata::less>(other);
}
DenseRaster<uint8_t> operator<(T threshold) const
{
return perform_unary_operation<nodata::less>(threshold);
}
template <typename TOther>
DenseRaster<uint8_t> operator<=(const DenseRaster<TOther>& other) const
{
return perform_binary_operation<nodata::less_equal>(other);
}
DenseRaster<uint8_t> operator<=(T threshold) const
{
return perform_unary_operation<nodata::less_equal>(threshold);
}
void replace(T oldValue, T newValue) noexcept
{
std::replace(begin(), end(), oldValue, newValue);
}
std::string to_string() const
{
if constexpr (std::is_same_v<uint8_t, T>) {
DenseRaster<uint16_t> copy(_meta);
std::copy(begin(), end(), copy.begin());
return copy.to_string();
} else {
std::stringstream ss;
for (int i = 0; i < rows(); ++i) {
std::span<const T> row(&_data[size_t(i) * cols()], cols());
ss << inf::str::join(row, ", ") << "\n";
}
return ss.str();
}
}
void init_nodata_values()
{
if constexpr (raster_type_has_nan) {
if (auto nodataOpt = nodata(); nodataOpt.has_value() && !std::isnan(*nodataOpt)) {
simd::for_each(begin(), end(), [nod = *nodataOpt](auto& v) {
v(v == nod) = NaN;
});
}
}
}
template <typename TResult = T>
TResult sum() const
{
auto result = TResult(0);
if (!nodata().has_value()) {
simd::for_each(begin(), end(), [&result](const auto& v) {
result += v.sum();
});
} else {
if constexpr (raster_type_has_nan) {
simd::for_each(begin(), end(), [&result](const auto& v) {
result += v.sum(!Vc::isnan(v));
});
} else {
simd::for_each(begin(), end(), [&result, nod = *nodata()](const auto& v) {
result += v.sum(v != nod);
});
}
}
return result;
}
private:
std::size_t index(int32_t row, int32_t col) const
{
return row * cols() + col;
}
static void throw_on_datasize_mismatch(int32_t rows, int32_t cols, size_t dataSize)
{
if (static_cast<size_t>(size_t(rows) * cols) != dataSize) {
throw InvalidArgument("Raster data size does not match provided dimensions {} vs {}x{}", dataSize, rows, cols);
}
}
template <typename T1, typename T2>
static constexpr bool floating_point_simd_supported()
{
return DenseRaster<T1>::simd_supported() && DenseRaster<T2>::simd_supported() &&
DenseRaster<T1>::has_nan() && DenseRaster<T2>::has_nan() && sizeof(T1) == sizeof(T2);
}
template <typename T1, typename T2>
static constexpr bool integral_simd_supported()
{
return DenseRaster<T1>::simd_supported() && DenseRaster<T2>::simd_supported() &&
!DenseRaster<T1>::has_nan() && !DenseRaster<T2>::has_nan() && sizeof(T1) == sizeof(T2);
}
template <typename BinaryPredicate, typename TOther, typename TResult>
void fp_simd_raster_operation(const DenseRaster<TOther>& other, DenseRaster<TResult>& result) const
{
static_assert(simd_supported() && DenseRaster<TOther>::simd_supported() && DenseRaster<TResult>::simd_supported(), "simd operation called with non supporting types");
static_assert(has_nan() && DenseRaster<TOther>::has_nan() && DenseRaster<TResult>::has_nan(), "floating point simd operation called with non floating point types");
using IsDivision = std::conditional_t<std::is_same_v<BinaryPredicate, std::divides<>>, std::true_type, std::false_type>;
simd::transform(begin(), end(), other.begin(), result.begin(), [](const auto& v1, const auto& v2) {
auto w1 = Vc::simd_cast<Vc::Vector<TResult, typename std::decay_t<decltype(v1)>::abi>>(v1);
auto w2 = Vc::simd_cast<Vc::Vector<TResult, typename std::decay_t<decltype(v2)>::abi>>(v2);
auto res = BinaryPredicate()(w1, w2);
if constexpr (IsDivision::value) {
res(w2 == 0) = DenseRaster<TResult>::NaN;
}
return res;
});
}
template <typename BinaryPredicate, typename TOther, typename TResult>
void int_simd_raster_operation(const DenseRaster<TOther>& other, DenseRaster<TResult>& result) const
{
static_assert(simd_supported() && DenseRaster<TOther>::simd_supported() && DenseRaster<TResult>::simd_supported(), "simd operation called with non supporting types");
static_assert(!has_nan() && !DenseRaster<TOther>::has_nan(), "integral simd operation called with non integral types");
using IsDivision = std::conditional_t<std::is_same_v<BinaryPredicate, std::divides<>>, std::true_type, std::false_type>;
if (!nodata().has_value() || !other.nodata().has_value()) {
// fallback to non simd implementation for other combinations
fallback_raster_operation<BinaryPredicate>(other, result);
return;
}
// when result has nan, the nodata value should also be nan (this is only the case for divisions)
if constexpr (DenseRaster<TResult>::has_nan()) {
static_assert(IsDivision::value);
assert(std::isnan(result.nodata().value()));
}
simd::transform(begin(), end(), other.begin(), result.begin(), [nod = result.nodata().value(), nod1 = nodata().value(), nod2 = other.nodata().value()](const auto& v1, const auto& v2) {
auto w1 = Vc::simd_cast<Vc::Vector<TResult, typename std::decay_t<decltype(v1)>::abi>>(v1);
auto w2 = Vc::simd_cast<Vc::Vector<TResult, typename std::decay_t<decltype(v2)>::abi>>(v2);
if constexpr (IsDivision::value) {
auto mask = w2 == 0;
w2(mask) = 1;
auto out = BinaryPredicate()(w1, w2);
out(w1 == nod1 || w2 == nod2 || mask) = nod;
return out;
} else {
auto out = BinaryPredicate()(w1, w2);
out(w1 == nod1 || w2 == nod2) = nod;
return out;
}
});
}
template <typename BinaryPredicate, typename TOther, typename TResult>
void simd_raster_operation(const DenseRaster<TOther>& other, DenseRaster<TResult>& result) const
{
if constexpr (floating_point_simd_supported<T, TOther>()) {
fp_simd_raster_operation<BinaryPredicate>(other, result);
} else if constexpr (integral_simd_supported<T, TOther>()) {
int_simd_raster_operation<BinaryPredicate>(other, result);
} else {
fallback_raster_operation<BinaryPredicate>(other, result);
}
}
template <typename BinaryPredicate, typename TOther, typename TResult>
auto fallback_raster_operation(const DenseRaster<TOther>& other, DenseRaster<TResult>& result) const
{
using IsDivision = std::conditional_t<std::is_same_v<BinaryPredicate, std::divides<>>, std::true_type, std::false_type>;
if (result.nodata().has_value()) {
auto nod = result.nodata().value();
if constexpr (DenseRaster<TResult>::has_nan()) {
nod = DenseRaster<TResult>::NaN;
}
for (std::size_t i = 0; i < size(); ++i) {
if (is_nodata(i) || other.is_nodata(i)) {
result[i] = nod;
} else {
if constexpr (IsDivision::value) {
if (other[i] == 0) {
result.mark_as_nodata(i);
continue;
}
}
result[i] = BinaryPredicate()(static_cast<TResult>(_data[i]), static_cast<TResult>(other[i]));
}
}
} else {
assert(!IsDivision::value);
assert(!nodata().has_value() && !other.nodata().has_value());
// the result does not have nodata this means the input rasters also do not have nodata
std::transform(cbegin(), cend(), other.cbegin(), result.begin(), [](auto& v1, auto& v2) {
return BinaryPredicate()(static_cast<TResult>(v1), static_cast<TResult>(v2));
});
}
}
template <typename BinaryPredicate, typename TOther, typename TResult>
void raster_operation(const DenseRaster<TOther>& other, DenseRaster<TResult>& result) const
{
constexpr bool simdSupported = simd_supported() && DenseRaster<TOther>::simd_supported();
if constexpr (simdSupported) {
simd_raster_operation<BinaryPredicate>(other, result);
} else {
// fallback to non simd implementation for other combinations
fallback_raster_operation<BinaryPredicate>(other, result);
}
}
// Performs a unary operation on all the elements that results in true or false
template <template <typename> typename BinaryPredicate, typename TOther>
DenseRaster<uint8_t> perform_unary_operation(TOther value) const
{
DenseRaster<uint8_t> result(_meta);
if (_meta.nodata.has_value()) {
result.set_nodata(static_cast<double>(std::numeric_limits<uint8_t>::max()));
}
auto pred = BinaryPredicate<T>(_meta.nodata, std::optional<double>());
const auto size = result.size();
#pragma omp parallel for
for (std::size_t i = 0; i < size; ++i) {
result[i] = pred(_data[i], static_cast<T>(value));
}
return result;
}
template <template <typename> typename UnaryPredicate>
DenseRaster<uint8_t> perform_unary_operation() const
{
DenseRaster<uint8_t> result(_meta);
if (_meta.nodata) {
result.set_nodata(static_cast<double>(std::numeric_limits<uint8_t>::max()));
}
std::transform(cbegin(), cend(), result.begin(), UnaryPredicate<T>(_meta.nodata));
return result;
}
template <template <typename> typename BinaryPredicate, typename TOther>
DenseRaster<uint8_t> perform_binary_operation(const DenseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
using WidestType = decltype(T() * TOther());
DenseRaster<uint8_t> result(_meta);
if (_meta.nodata.has_value() || other.metadata().nodata.has_value()) {
result.set_nodata(std::numeric_limits<uint8_t>::max());
}
auto pred = BinaryPredicate<WidestType>(_meta.nodata, other.metadata().nodata);
const auto size = result.size();
#pragma omp parallel for
for (std::size_t i = 0; i < size; ++i) {
result[i] = pred(static_cast<WidestType>(_data[i]), static_cast<WidestType>(other[i]));
}
return result;
}
template <typename BinaryPredicate, typename TScalar>
auto perform_scalar_operation(TScalar scalar) const
{
using ResultType = decltype(BinaryPredicate()(T(), TScalar()));
DenseRaster<ResultType> result(_meta);
if constexpr (!simd_supported() || sizeof(ResultType) != sizeof(T)) {
std::transform(begin(), end(), result.begin(), [this, scalar](T value) {
if (is_nodata_value(value)) {
return static_cast<ResultType>(value);
}
return BinaryPredicate()(value, scalar);
});
} else if (has_nan() || !nodata().has_value()) {
simd::transform(begin(), end(), result.begin(), [scalar](auto v) {
using ResultVectorType = Vc::Vector<ResultType, typename decltype(v)::abi>;
return BinaryPredicate()(Vc::simd_cast<ResultVectorType>(v), scalar);
});
} else {
assert(nodata().has_value());
simd::transform(begin(), end(), result.begin(), [scalar, nod = *nodata()](auto v) {
using ResultVectorType = Vc::Vector<ResultType, typename decltype(v)::abi>;
auto w = Vc::simd_cast<ResultVectorType>(v);
auto out = BinaryPredicate()(w, scalar);
out(w == nod) = nod;
return out;
});
}
return result;
}
template <typename BinaryPredicate, typename TScalar>
DenseRaster<T>& perform_scalar_operation_inplace(TScalar scalar)
{
static_assert(std::is_scalar_v<TScalar>, "Arithmetic operation called with non scalar type");
if constexpr (!simd_supported()) {
std::for_each(begin(), end(), [this, scalar](T& value) {
if (is_nodata_value(value)) {
return;
}
value = BinaryPredicate()(value, scalar);
});
} else if (has_nan() || !nodata().has_value()) {
simd::for_each(begin(), end(), [scalar](auto& value) {
value = BinaryPredicate()(value, scalar);
});
} else {
assert(nodata().has_value());
simd::for_each(begin(), end(), [scalar, nod = *nodata()](auto& value) {
value(value != nod) = BinaryPredicate()(value, scalar);
});
}
return *this;
}
template <typename BinaryPredicate, typename TOther>
DenseRaster<T>& perform_raster_operation_in_place(const DenseRaster<TOther>& other)
{
throw_on_size_mismatch(*this, other);
detail::assign_nodata_value(*this, other, *this);
// Division is special: divide by zero becomes nodata
using IsDivision = std::conditional_t<std::is_same_v<BinaryPredicate, std::divides<>>, std::true_type, std::false_type>;
if constexpr (IsDivision::value) {
if (!_meta.nodata.has_value()) {
_meta.nodata = detail::nodata_for_type<T>();
}
}
raster_operation<BinaryPredicate>(other, *this);
return *this;
}
template <typename BinaryPredicate, typename TOther>
auto perform_raster_operation(const DenseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
using IsDivision = std::conditional_t<std::is_same_v<BinaryPredicate, std::divides<>>, std::true_type, std::false_type>;
using DivType = decltype(BinaryPredicate()(1.f, std::common_type_t<T, TOther>()));
using Type = decltype(BinaryPredicate()(T(), TOther()));
using TResult = std::conditional_t<IsDivision::value, DivType, Type>;
DenseRaster<TResult> result(_meta);
if constexpr (IsDivision::value) {
result.set_nodata(DenseRaster<TResult>::NaN);
} else {
detail::assign_nodata_value(*this, other, result);
}
raster_operation<BinaryPredicate>(other, result);
return result;
}
RasterMetadata _meta;
data_type _data;
};
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
DenseRaster<T> operator+(TScalar lhs, const DenseRaster<T>& rhs)
{
return rhs + lhs;
}
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
auto operator-(TScalar value, const DenseRaster<T>& rhs)
{
using ResultType = decltype(TScalar() - T());
DenseRaster<ResultType> result(rhs.metadata());
std::transform(begin(rhs), end(rhs), begin(result), nodata::minus_scalar_first<ResultType>(rhs.metadata().nodata, static_cast<ResultType>(value)));
return result;
}
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
DenseRaster<T> operator*(TScalar lhs, const DenseRaster<T>& rhs)
{
return rhs * lhs;
}
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
auto operator/(TScalar scalar, const DenseRaster<T>& rhs)
{
//throw_on_size_mismatch(other);
//// For nan nodata, standard eigen operator can be used
//if constexpr (has_nan() && std::is_same_v<T, TOther>) {
// // all types are the same, no casts needed
// return DenseRaster<T>(_meta, _data / other._data);
//}
//return performRasterOperation<nodata::divides>(other);
using ResultType = decltype(1.0f * T());
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
DenseRaster<ResultType> result(rhs.metadata());
for (std::size_t i = 0; i < rhs.size(); ++i) {
auto value = rhs[i];
if (value == 0) {
if (!result.nodata().has_value()) {
throw InvalidArgument("Division by raster that contains 0 values");
}
result.mark_as_nodata(i);
} else {
result[i] = scalar / static_cast<ResultType>(value);
}
}
return result;
}
template <typename T>
auto cbegin(const DenseRaster<T>& ras)
{
return ras.data();
}
template <typename T>
auto cend(const DenseRaster<T>& ras)
{
return ras.cend();
}
template <typename T>
auto begin(DenseRaster<T>& ras)
{
return ras.begin();
}
template <typename T>
auto begin(const DenseRaster<T>& ras)
{
return ras.begin();
}
template <typename T>
auto end(DenseRaster<T>& ras)
{
return ras.end();
}
template <typename T>
auto end(const DenseRaster<T>& ras)
{
return ras.cend();
}
template <typename T>
const T* data(const DenseRaster<T>& ras)
{
return ras.data();
}
template <typename T>
T* data(DenseRaster<T>& ras)
{
return ras.data();
}
template <typename T>
auto size(const DenseRaster<T>& ras)
{
return ras.size();
}
}
|
ocp_nlp_sqp_rti.c | /*
* Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
* Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
* Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
* Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
*
* This file is part of acados.
*
* The 2-Clause BSD License
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.;
*/
#include "acados/ocp_nlp/ocp_nlp_sqp_rti.h"
// external
#include <assert.h>
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#if defined(ACADOS_WITH_OPENMP)
#include <omp.h>
#endif
// blasfeo
#include "blasfeo/include/blasfeo_d_aux.h"
#include "blasfeo/include/blasfeo_d_aux_ext_dep.h"
#include "blasfeo/include/blasfeo_d_blas.h"
// acados
#include "acados/ocp_nlp/ocp_nlp_common.h"
#include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h"
#include "acados/ocp_nlp/ocp_nlp_reg_common.h"
#include "acados/ocp_qp/ocp_qp_common.h"
#include "acados/utils/mem.h"
#include "acados/utils/print.h"
#include "acados/utils/timing.h"
#include "acados/utils/types.h"
#include "acados_c/ocp_qp_interface.h"
/************************************************
* options
************************************************/
int ocp_nlp_sqp_rti_opts_calculate_size(void *config_, void *dims_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
int size = 0;
size += sizeof(ocp_nlp_sqp_rti_opts);
size += ocp_nlp_opts_calculate_size(config, dims);
return size;
}
void *ocp_nlp_sqp_rti_opts_assign(void *config_, void *dims_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
char *c_ptr = (char *) raw_memory;
ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_rti_opts);
opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr);
c_ptr += ocp_nlp_opts_calculate_size(config, dims);
assert((char *) raw_memory + ocp_nlp_sqp_rti_opts_calculate_size(config,
dims) >= c_ptr);
return opts;
}
void ocp_nlp_sqp_rti_opts_initialize_default(void *config_,
void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// ocp_nlp_dynamics_config **dynamics = config->dynamics;
// ocp_nlp_constraints_config **constraints = config->constraints;
// int ii;
// int N = dims->N;
// this first !!!
ocp_nlp_opts_initialize_default(config, dims, nlp_opts);
// SQP RTI opts
opts->ext_qp_res = 0;
opts->warm_start_first_qp = false;
opts->rti_phase = 0;
opts->print_level = 0;
// overwrite default submodules opts
// do not compute adjoint in dynamics and constraints
// int compute_adj = 0;
// // dynamics
// for (ii = 0; ii < N; ii++)
// {
// dynamics[ii]->opts_set(dynamics[ii],
// opts->nlp_opts->dynamics[ii], "compute_adj", &compute_adj);
// }
// // constraints
// for (ii = 0; ii <= N; ii++)
// {
// constraints[ii]->opts_set(constraints[ii],
// opts->nlp_opts->constraints[ii], "compute_adj", &compute_adj);
// }
return;
}
void ocp_nlp_sqp_rti_opts_update(void *config_, void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_update(config, dims, nlp_opts);
return;
}
void ocp_nlp_sqp_rti_opts_set(void *config_, void *opts_,
const char *field, void* value)
{
ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_;
ocp_nlp_config *config = config_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
int ii;
char module[MAX_STR_LEN];
char *ptr_module = NULL;
int module_length = 0;
// extract module name
char *char_ = strchr(field, '_');
if (char_!=NULL)
{
module_length = char_-field;
for (ii=0; ii<module_length; ii++)
module[ii] = field[ii];
module[module_length] = '\0'; // add end of string
ptr_module = module;
}
// pass options to QP module
if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) )
{
ocp_nlp_opts_set(config, nlp_opts, field, value);
if (!strcmp(field, "qp_warm_start"))
{
int* i_ptr = (int *) value;
opts->qp_warm_start = *i_ptr;
}
}
else // nlp opts
{
if (!strcmp(field, "ext_qp_res"))
{
int* ext_qp_res = (int *) value;
opts->ext_qp_res = *ext_qp_res;
}
else if (!strcmp(field, "warm_start_first_qp"))
{
bool* warm_start_first_qp = (bool *) value;
opts->warm_start_first_qp = *warm_start_first_qp;
}
else if (!strcmp(field, "rti_phase"))
{
int* rti_phase = (int *) value;
if (*rti_phase < 0 || *rti_phase > 2) {
printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field.");
printf("possible values are: 0, 1, 2\n");
exit(1);
} else opts->rti_phase = *rti_phase;
}
else if (!strcmp(field, "print_level"))
{
int* print_level = (int *) value;
if (*print_level < 0)
{
printf("\nerror: ocp_nlp_sqp_rti_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level);
exit(1);
}
opts->print_level = *print_level;
}
else
{
ocp_nlp_opts_set(config, nlp_opts, field, value);
}
}
return;
}
void ocp_nlp_sqp_rti_opts_set_at_stage(void *config_, void *opts_,
int stage, const char *field, void* value)
{
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value);
return;
}
/************************************************
* memory
************************************************/
int ocp_nlp_sqp_rti_memory_calculate_size(void *config_,
void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// ocp_nlp_dynamics_config **dynamics = config->dynamics;
// ocp_nlp_cost_config **cost = config->cost;
// ocp_nlp_constraints_config **constraints = config->constraints;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
int size = 0;
size += sizeof(ocp_nlp_sqp_rti_memory);
// nlp mem
size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
int stat_m = 1+1;
int stat_n = 2;
if (opts->ext_qp_res)
stat_n += 4;
size += stat_n*stat_m*sizeof(double);
size += 8; // initial align
make_int_multiple_of(8, &size);
return size;
}
void *ocp_nlp_sqp_rti_memory_assign(void *config_, void *dims_,
void *opts_, void *raw_memory)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
// ocp_nlp_dynamics_config **dynamics = config->dynamics;
// ocp_nlp_cost_config **cost = config->cost;
// ocp_nlp_constraints_config **constraints = config->constraints;
char *c_ptr = (char *) raw_memory;
// int ii;
// int N = dims->N;
// int *nx = dims->nx;
// int *nu = dims->nu;
// int *nz = dims->nz;
// initial align
align_char_to(8, &c_ptr);
ocp_nlp_sqp_rti_memory *mem = (ocp_nlp_sqp_rti_memory *) c_ptr;
c_ptr += sizeof(ocp_nlp_sqp_rti_memory);
// nlp mem
mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr);
c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts);
// stat
mem->stat = (double *) c_ptr;
mem->stat_m = 1+1;
mem->stat_n = 2;
if (opts->ext_qp_res)
mem->stat_n += 4;
c_ptr += mem->stat_m*mem->stat_n*sizeof(double);
mem->status = ACADOS_READY;
assert((char *) raw_memory+ocp_nlp_sqp_rti_memory_calculate_size(
config, dims, opts) >= c_ptr);
return mem;
}
/************************************************
* workspace
************************************************/
int ocp_nlp_sqp_rti_workspace_calculate_size(void *config_,
void *dims_, void *opts_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
int size = 0;
// sqp
size += sizeof(ocp_nlp_sqp_rti_workspace);
// nlp
size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// qp in
size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims);
}
return size;
}
static void ocp_nlp_sqp_rti_cast_workspace(
ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_rti_opts *opts,
ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_workspace *work)
{
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
// sqp
char *c_ptr = (char *) work;
c_ptr += sizeof(ocp_nlp_sqp_rti_workspace);
// nlp
work->nlp_work = ocp_nlp_workspace_assign(
config, dims, nlp_opts, nlp_mem, c_ptr);
c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts);
// qp in
work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims);
// qp out
work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims);
if (opts->ext_qp_res)
{
// qp res
work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims);
// qp res ws
work->qp_res_ws = ocp_qp_res_workspace_assign(
dims->qp_solver->orig_dims, c_ptr);
c_ptr += ocp_qp_res_workspace_calculate_size(
dims->qp_solver->orig_dims);
}
assert((char *) work + ocp_nlp_sqp_rti_workspace_calculate_size(config,
dims, opts) >= c_ptr);
return;
}
/************************************************
* functions
************************************************/
int ocp_nlp_sqp_rti(void *config_, void *dims_, void *nlp_in_, void *nlp_out_,
void *opts_, void *mem_, void *work_)
{
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_sqp_rti_memory *mem = mem_;
// zero timers
acados_timer timer0;
double total_time = 0.0;
mem->time_tot = 0.0;
ocp_nlp_sqp_rti_opts *nlp_opts = opts_;
int rti_phase = nlp_opts->rti_phase;
acados_tic(&timer0);
switch(rti_phase)
{
// perform preparation and feedback rti_phase
case 0:
ocp_nlp_sqp_rti_preparation_step(
config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_);
ocp_nlp_sqp_rti_feedback_step(
config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_);
break;
// perform preparation rti_phase
case 1:
ocp_nlp_sqp_rti_preparation_step(
config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_);
break;
// perform feedback rti_phase
case 2:
ocp_nlp_sqp_rti_feedback_step(
config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_);
break;
}
total_time += acados_toc(&timer0);
mem->time_tot = total_time;
nlp_out->total_time = total_time;
return mem->status;
}
void ocp_nlp_sqp_rti_preparation_step(void *config_, void *dims_,
void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_)
{
acados_timer timer1;
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
// ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_rti_workspace *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
mem->time_lin = 0.0;
mem->time_reg = 0.0;
int N = dims->N;
int ii;
#if defined(ACADOS_WITH_OPENMP)
// backup number of threads
int num_threads_bkp = omp_get_num_threads();
// set number of threads
omp_set_num_threads(opts->nlp_opts->num_threads);
#pragma omp parallel
{ // beginning of parallel region
#endif
// alias to dynamics_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->memory_set_ux_ptr(
nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux_ptr(
nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_ux1_ptr(
nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_ux1_ptr(
nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_pi_ptr(
nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_tmp_pi_ptr(
nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_BAbt_ptr(
nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_RSQrq_ptr(
nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_dzduxt_ptr(
nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_sim_guess_ptr(
nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii,
nlp_mem->dynamics[ii]);
config->dynamics[ii]->memory_set_z_alg_ptr(
nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]);
}
// alias to cost_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (ii = 0; ii <= N; ii++)
{
config->cost[ii]->memory_set_ux_ptr(
nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_tmp_ux_ptr(
nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_z_alg_ptr(
nlp_mem->z_alg+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_dzdux_tran_ptr(
nlp_mem->dzduxt+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_RSQrq_ptr(
nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]);
config->cost[ii]->memory_set_Z_ptr(
nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]);
}
// alias to constraints_memory
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
for (ii = 0; ii <= N; ii++)
{
config->constraints[ii]->memory_set_ux_ptr(
nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_ux_ptr(
nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_lam_ptr(
nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_tmp_lam_ptr(
nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_z_alg_ptr(
nlp_mem->z_alg+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_dzdux_tran_ptr(
nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_DCt_ptr(
nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_RSQrq_ptr(
nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxb_ptr(
nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxs_rev_ptr(
nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]);
config->constraints[ii]->memory_set_idxe_ptr(
nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]);
}
// alias to regularize memory
config->regularize->memory_set_RSQrq_ptr(
dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem);
config->regularize->memory_set_rq_ptr(
dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem);
config->regularize->memory_set_BAbt_ptr(
dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem);
config->regularize->memory_set_b_ptr(
dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem);
config->regularize->memory_set_idxb_ptr(
dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem);
config->regularize->memory_set_DCt_ptr(
dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem);
config->regularize->memory_set_ux_ptr(
dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem);
config->regularize->memory_set_pi_ptr(
dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem);
config->regularize->memory_set_lam_ptr(
dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem);
// copy sampling times into dynamics model
#if defined(ACADOS_WITH_OPENMP)
#pragma omp for nowait
#endif
// NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute;
// -> remove here and make sure precompute is called everywhere (e.g. Python interface).
for (ii = 0; ii < N; ii++)
{
config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii],
nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
}
#if defined(ACADOS_WITH_OPENMP)
} // end of parallel region
#endif
// initialize QP
ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
/* SQP body */
int sqp_iter = 0;
nlp_mem->sqp_iter = &sqp_iter;
// linearizate NLP and update QP matrices
acados_tic(&timer1);
ocp_nlp_approximate_qp_matrices(config, dims, nlp_in,
nlp_out, nlp_opts, nlp_mem, nlp_work);
mem->time_lin += acados_toc(&timer1);
#if defined(ACADOS_WITH_OPENMP)
// restore number of threads
omp_set_num_threads(num_threads_bkp);
#endif
return;
}
void ocp_nlp_sqp_rti_feedback_step(void *config_, void *dims_,
void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_)
{
acados_timer timer1;
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_opts *nlp_opts = opts->nlp_opts;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_qp_xcond_solver_config *qp_solver = config->qp_solver;
ocp_nlp_sqp_rti_workspace *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
int qp_iter = 0;
int qp_status = 0;
double tmp_time;
mem->time_qp_sol = 0.0;
mem->time_qp_solver_call = 0.0;
mem->time_qp_xcond = 0.0;
// embed initial value (this actually updates all bounds at stage 0...)
ocp_nlp_embed_initial_value(config, dims, nlp_in,
nlp_out, nlp_opts, nlp_mem, nlp_work);
// update QP rhs for SQP (step prim var, abs dual var)
ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in,
nlp_out, nlp_opts, nlp_mem, nlp_work);
// regularize Hessian
acados_tic(&timer1);
config->regularize->regularize_hessian(config->regularize,
dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
if (opts->print_level > 0) {
printf("\n------- qp_in --------\n");
print_ocp_qp_in(nlp_mem->qp_in);
}
if (!opts->warm_start_first_qp)
{
int tmp_int = 0;
config->qp_solver->opts_set(config->qp_solver,
opts->nlp_opts->qp_solver_opts, "warm_start", &tmp_int);
}
// solve qp
acados_tic(&timer1);
qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver,
nlp_mem->qp_in, nlp_mem->qp_out, opts->nlp_opts->qp_solver_opts,
nlp_mem->qp_solver_mem, nlp_work->qp_work);
mem->time_qp_sol += acados_toc(&timer1);
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time);
mem->time_qp_solver_call += tmp_time;
qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time);
mem->time_qp_xcond += tmp_time;
// compute correct dual solution in case of Hessian regularization
acados_tic(&timer1);
config->regularize->correct_dual_sol(config->regularize,
dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem);
mem->time_reg += acados_toc(&timer1);
// TODO move into QP solver memory ???
qp_info *qp_info_;
ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_);
nlp_out->qp_iter = qp_info_->num_iter;
qp_iter = qp_info_->num_iter;
// compute external QP residuals (for debugging)
if (opts->ext_qp_res)
{
ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out,
work->qp_res, work->qp_res_ws);
ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*1+2));
// printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter,
// inf_norm_qp_res[0], inf_norm_qp_res[1],
// inf_norm_qp_res[2], inf_norm_qp_res[3]);
}
// printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter);
// print_ocp_qp_out(nlp_mem->qp_out);
// exit(1);
// save statistics
mem->stat[mem->stat_n*1+0] = qp_status;
mem->stat[mem->stat_n*1+1] = qp_iter;
if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER))
{
// print_ocp_qp_in(mem->qp_in);
#ifndef ACADOS_SILENT
printf("QP solver returned error status %d\n", qp_status);
#endif
mem->status = ACADOS_QP_FAILURE;
return;
}
ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work);
// ocp_nlp_dims_print(nlp_out->dims);
// ocp_nlp_out_print(nlp_out);
// exit(1);
// print_ocp_qp_in(mem->qp_in);
mem->status = ACADOS_SUCCESS;
}
int ocp_nlp_sqp_rti_precompute(void *config_, void *dims_, void *nlp_in_,
void *nlp_out_, void *opts_, void *mem_, void *work_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_in *nlp_in = nlp_in_;
// ocp_nlp_out *nlp_out = nlp_out_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_sqp_rti_workspace *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
int N = dims->N;
int status = ACADOS_SUCCESS;
int ii;
// TODO(giaf) flag to enable/disable checks
for (ii = 0; ii <= N; ii++)
{
int module_val;
config->constraints[ii]->dims_get(config->constraints[ii],
dims->constraints[ii], "ns", &module_val);
if (dims->ns[ii] != module_val)
{
printf("ocp_nlp_sqp_rti_precompute: inconsistent dimension ns \
for stage %d with constraint module, got %d, module: %d.",
ii, dims->ns[ii], module_val);
exit(1);
}
}
// precompute
for (ii = 0; ii < N; ii++)
{
// set T
config->dynamics[ii]->model_set(config->dynamics[ii],
dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii);
// dynamics precompute
status = config->dynamics[ii]->precompute(config->dynamics[ii],
dims->dynamics[ii], nlp_in->dynamics[ii],
opts->nlp_opts->dynamics[ii],
nlp_mem->dynamics[ii],
nlp_work->dynamics[ii]);
if (status != ACADOS_SUCCESS)
return status;
}
return status;
}
void ocp_nlp_sqp_rti_eval_param_sens(void *config_, void *dims_, void *opts_,
void *mem_, void *work_, char *field, int stage, int index,
void *sens_nlp_out_)
{
ocp_nlp_dims *dims = dims_;
ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
ocp_nlp_sqp_rti_memory *mem = mem_;
ocp_nlp_memory *nlp_mem = mem->nlp_mem;
ocp_nlp_out *sens_nlp_out = sens_nlp_out_;
ocp_nlp_sqp_rti_workspace *work = work_;
ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work);
ocp_nlp_workspace *nlp_work = work->nlp_work;
d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in);
d_ocp_qp_set_rhs_zero(work->tmp_qp_in);
double one = 1.0;
if ((!strcmp("ex", field)) & (stage==0))
{
d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in);
d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in);
// d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in);
config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver,
work->tmp_qp_in, work->tmp_qp_out, opts->nlp_opts->qp_solver_opts,
nlp_mem->qp_solver_mem, nlp_work->qp_work);
// d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out);
// exit(1);
/* copy tmp_qp_out into sens_nlp_out */
int i;
int N = dims->N;
int *nv = dims->nv;
int *nx = dims->nx;
// int *nu = dims->nu;
int *ni = dims->ni;
// int *nz = dims->nz;
for (i = 0; i <= N; i++)
{
blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0,
sens_nlp_out->ux + i, 0);
if (i < N)
blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0,
sens_nlp_out->pi + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0,
sens_nlp_out->lam + i, 0);
blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0,
sens_nlp_out->t + i, 0);
}
}
else
{
printf("\nerror: field %s at stage %d not available in \
ocp_nlp_sqp_rti_eval_param_sens\n", field, stage);
exit(1);
}
return;
}
// TODO rename memory_get ???
void ocp_nlp_sqp_rti_get(void *config_, void *dims_, void *mem_,
const char *field, void *return_value_)
{
ocp_nlp_config *config = config_;
ocp_nlp_dims *dims = dims_;
ocp_nlp_sqp_rti_memory *mem = mem_;
if (!strcmp("sqp_iter", field))
{
int *value = return_value_;
*value = 1;
}
else if (!strcmp("status", field))
{
int *value = return_value_;
*value = mem->status;
}
else if (!strcmp("time_tot", field) || !strcmp("tot_time", field))
{
double *value = return_value_;
*value = mem->time_tot;
}
else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field))
{
double *value = return_value_;
*value = mem->time_qp_sol;
}
else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field))
{
double *value = return_value_;
*value = mem->time_qp_solver_call;
}
else if (!strcmp("time_qp_xcond", field))
{
double *value = return_value_;
*value = mem->time_qp_xcond;
}
else if (!strcmp("time_lin", field))
{
double *value = return_value_;
*value = mem->time_lin;
}
else if (!strcmp("time_reg", field))
{
double *value = return_value_;
*value = mem->time_reg;
}
else if (!strcmp("time_sim", field) || !strcmp("time_sim_ad", field) || !strcmp("time_sim_la", field))
{
double tmp = 0.0;
double *ptr = return_value_;
int N = dims->N;
int ii;
for (ii=0; ii<N; ii++)
{
config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], field, &tmp);
*ptr += tmp;
}
}
else if (!strcmp("stat", field))
{
double **value = return_value_;
*value = mem->stat;
}
else if (!strcmp("statistics", field))
{
int n_row = 2;
double *value = return_value_;
for (int ii=0; ii<n_row; ii++)
{
value[ii+0] = ii;
for (int jj=0; jj<mem->stat_n; jj++)
value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n];
}
}
else if (!strcmp("stat_m", field))
{
int *value = return_value_;
*value = mem->stat_m;
}
else if (!strcmp("stat_n", field))
{
int *value = return_value_;
*value = mem->stat_n;
}
else if (!strcmp("nlp_mem", field))
{
void **value = return_value_;
*value = mem->nlp_mem;
}
else if (!strcmp("qp_xcond_dims", field))
{
void **value = return_value_;
*value = dims->qp_solver->xcond_dims;
}
else if (!strcmp("nlp_res", field))
{
ocp_nlp_res **value = return_value_;
*value = mem->nlp_mem->nlp_res;
}
else if (!strcmp("qp_xcond_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_in;
}
else if (!strcmp("qp_xcond_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_solver_mem->xcond_qp_out;
}
else if (!strcmp("qp_in", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_in;
}
else if (!strcmp("qp_out", field))
{
void **value = return_value_;
*value = mem->nlp_mem->qp_out;
}
else if (!strcmp("qp_iter", field))
{
config->qp_solver->memory_get(config->qp_solver,
mem->nlp_mem->qp_solver_mem, "iter", return_value_);
}
else if (!strcmp("res_stat", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_stat;
}
else if (!strcmp("res_eq", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_eq;
}
else if (!strcmp("res_ineq", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_ineq;
}
else if (!strcmp("res_comp", field))
{
double *value = return_value_;
*value = mem->nlp_mem->nlp_res->inf_norm_res_comp;
}
else if (!strcmp("cost_value", field))
{
double *value = return_value_;
*value = mem->nlp_mem->cost_value;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_rti_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_rti_opts_get(void *config_, void *dims_, void *opts_,
const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_opts *opts = opts_;
if (!strcmp("nlp_opts", field))
{
void **value = return_value_;
*value = opts->nlp_opts;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_rti_opts_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_rti_work_get(void *config_, void *dims_, void *work_,
const char *field, void *return_value_)
{
// ocp_nlp_config *config = config_;
ocp_nlp_sqp_rti_workspace *work = work_;
if (!strcmp("nlp_work", field))
{
void **value = return_value_;
*value = work->nlp_work;
}
else
{
printf("\nerror: field %s not available in ocp_nlp_sqp_rti_work_get\n", field);
exit(1);
}
}
void ocp_nlp_sqp_rti_config_initialize_default(void *config_)
{
ocp_nlp_config *config = (ocp_nlp_config *) config_;
config->opts_calculate_size = &ocp_nlp_sqp_rti_opts_calculate_size;
config->opts_assign = &ocp_nlp_sqp_rti_opts_assign;
config->opts_initialize_default = &ocp_nlp_sqp_rti_opts_initialize_default;
config->opts_update = &ocp_nlp_sqp_rti_opts_update;
config->opts_set = &ocp_nlp_sqp_rti_opts_set;
config->opts_set_at_stage = &ocp_nlp_sqp_rti_opts_set_at_stage;
config->memory_calculate_size = &ocp_nlp_sqp_rti_memory_calculate_size;
config->memory_assign = &ocp_nlp_sqp_rti_memory_assign;
config->workspace_calculate_size = &ocp_nlp_sqp_rti_workspace_calculate_size;
config->evaluate = &ocp_nlp_sqp_rti;
config->eval_param_sens = &ocp_nlp_sqp_rti_eval_param_sens;
config->config_initialize_default = &ocp_nlp_sqp_rti_config_initialize_default;
config->precompute = &ocp_nlp_sqp_rti_precompute;
config->get = &ocp_nlp_sqp_rti_get;
config->opts_get = &ocp_nlp_sqp_rti_opts_get;
config->work_get = &ocp_nlp_sqp_rti_work_get;
return;
}
|
GraphBuilder.h | /*
* GraphBuilder.h
*
* Created on: 15.07.2014
* Author: Marvin Ritter (marvin.ritter@gmail.com)
*/
#ifndef GRAPH_BUILDER_H
#define GRAPH_BUILDER_H
#include <vector>
#include "../Globals.h"
#include "Graph.h"
namespace NetworKit {
/*
* The GraphBuilder helps to speed up graph generation by minimizing the number of checks on addEdge/setWeight/increaseWeight. Further more it delays the construction of some internal data structures of the Graph class until you call toGraph(). toGraph() can only be called once.
* In the Graph class for an edge u -> v, v is stored in the adjacent array of u (first half) and u in the adjacent array of v (second half). (For directed graphs these might be in and out adjacent arrays.). So each edge can be seen as a pair of 2 half edges. To allow optimization and mainly parallelization GraphBuilder lets you add both half edges yourself. You are responsible for adding both half edges, otherwise you might end up with an invalid Graph object.
* As adding the first half edge of an edge u -> v only requires access to the adjacent array of u, other threads can add edges a -> b as long as a != u. Some goes for the methods setWeight and increaseWeight. Note: If you add the first half edge of u -> v, you can change the weight by calling setWeight(u, v, ew) or increaseWeight(u, v, ew), but calling setWeight(v, u, ew) or increaseWeight(v, u, ew) will add the second half edge.
* GraphBuilder allows you to be lazy and only add one half of each edge. Calling toGraph with autoCompleteEdges set to true, will make each half Edge in GraphBuilder to one full edge in Graph.
*
* So far I didn't came up with a good parallelization for toGraph, so at some point I might omit the parallel parameter for toGraph.
*/
class GraphBuilder {
private:
count n; //!< current number of nodes
count selfloops; //!< currently encountered number of self loops
std::string name; //!< name of the graph, if not set it will be G#ID
bool weighted; //!< true if the graph will be weighted, false otherwise
bool directed; //!< true if the graph will be directed, false otherwise
std::vector< std::vector<node> > outEdges; //!< (outgoing) edges, for each edge (u, v) v is saved in outEdges[u] and for undirected also u in outEdges[v]
std::vector< std::vector<edgeweight> > outEdgeWeights; //!< same schema (and same order!) as outEdges
std::vector< std::vector<node> > inEdges; //!< only used for directed graphs, inEdges[v] contains all nodes u that have an edge (u, v)
std::vector< std::vector<edgeweight> > inEdgeWeights; //!< only used for directed graphs, same schema as inEdges
index indexInOutEdgeArray(node u, node v) const;
index indexInInEdgeArray(node u, node v) const;
public:
/**
* Creates a new GraphBuilder. GraphBuilder supports the basic methods needed to create a new graph (addNode, addEdge, setWeight, increaseWeight). It is designed to be much faster for graph creation, but the speed comes with a restriction:
* For undirected graphs GraphBuilder will handle u->v and v->u as two different edges. Keep that in mind when using setWeight and increaseWeight.
* GraphBuilder allows parallelization in a special way. It's internal data structure saves edges only at the source node. As long as edges from node u are only added/changed by thread t1, every other thread can modifier edges not starting in u.
* addNode is not threadsafe.
* @param n Number of nodes.
* @param weighted If set to <code>true</code>, the graph has edge weights.
* @param directed If set to @c true, the graph will be directed.
*/
GraphBuilder(count n = 0, bool weighted = false, bool directed = false);
void reset(count n = 0);
/**
* Set name of graph to @a name.
* @param name The name.
*/
void setName(std::string name) { this->name = name; }
/**
* Returns <code>true</code> if this graph supports edge weights other than 1.0.
* @return <code>true</code> if this graph supports edge weights other than 1.0.
*/
inline bool isWeighted() const { return weighted; }
/**
* Return <code>true</code> if this graph supports directed edges.
* @return </code>true</code> if this graph supports directed edges.
*/
inline bool isDirected() const { return directed; }
/**
* Return <code>true</code> if graph contains no nodes.
* @return <code>true</code> if graph contains no nodes.
*/
inline bool isEmpty() const { return n == 0; }
/**
* Return the number of nodes in the graph.
* @return The number of nodes.
*/
count numberOfNodes() const { return n; }
/**
* Get an upper bound for the node ids in the graph.
* @return An upper bound for the node ids.
*/
index upperNodeIdBound() const { return n; }
/**
* Add a new node to the graph and return it.
* @return The new node.
*/
node addNode();
/**
* Insert an edge between the nodes @a u and @a v. If the graph is weighted you can optionally
* set a weight for this edge. The default weight is 1.0.
* @param u Endpoint of edge.
* @param v Endpoint of edge.
* @param weight Optional edge weight.
*/
void addHalfEdge(node u, node v, edgeweight ew = defaultEdgeWeight) { addHalfOutEdge(u, v, ew); }
void addHalfOutEdge(node u, node v, edgeweight ew = defaultEdgeWeight);
void addHalfInEdge(node u, node v, edgeweight ew = defaultEdgeWeight);
void swapNeighborhood(node u, std::vector<node> &neighbours, std::vector<edgeweight> &weights, bool selfloop);
/**
* Set the weight of an edge. If the edge does not exist,
* it will be inserted.
*
* @param[in] u endpoint of edge
* @param[in] v endpoint of edge
* @param[in] weight edge weight
*/
void setWeight(node u, node v, edgeweight ew) { setOutWeight(u, v, ew); }
void setOutWeight(node u, node v, edgeweight ew);
void setInWeight(node u, node v, edgeweight ew);
/**
* Increase the weight of an edge. If the edge does not exist,
* it will be inserted.
*
* @param[in] u endpoint of edge
* @param[in] v endpoint of edge
* @param[in] weight edge weight
*/
void increaseWeight(node u, node v, edgeweight ew) { increaseOutWeight(u, v, ew); }
void increaseOutWeight(node u, node v, edgeweight ew);
void increaseInWeight(node u, node v, edgeweight ew);
/**
* Generates a Graph instance. The graph builder will be reseted at the end.
*/
Graph toGraph(bool autoCompleteEdges, bool parallel = false);
/**
* Iterate over all nodes of the graph and call @a handle (lambda closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void forNodes(L handle) const;
/**
* Iterate randomly over all nodes of the graph and call @a handle (lambda closure).
*
* @param handle Takes parameter <code>(node)</code>.
*/
template<typename L> void parallelForNodes(L handle) const;
/**
* Iterate over all undirected pairs of nodes and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code>.
*/
template<typename L> void forNodePairs(L handle) const;
/**
* Iterate over all undirected pairs of nodes in parallel and call @a handle (lambda closure).
*
* @param handle Takes parameters <code>(node, node)</code>.
*/
template<typename L> void parallelForNodePairs(L handle) const;
private:
void toGraphDirectSwap(Graph &G);
void toGraphSequential(Graph &G);
void toGraphParallel(Graph &G);
template <typename T>
static void copyAndClear(std::vector<T>& source, std::vector<T>& target);
void setDegrees(Graph& G);
count numberOfEdges(const Graph& G);
};
template<typename L>
void GraphBuilder::forNodes(L handle) const {
for (node v = 0; v < n; v++) {
handle(v);
}
}
template<typename L>
void GraphBuilder::parallelForNodes(L handle) const {
#pragma omp parallel for schedule(dynamic, 100)
for (node v = 0; v < n; v++) {
handle(v);
}
}
template<typename L>
void GraphBuilder::forNodePairs(L handle) const {
for (node u = 0; u < n; u++) {
for (node v = u + 1; v < n; v++) {
handle(u, v);
}
}
}
template<typename L>
void GraphBuilder::parallelForNodePairs(L handle) const {
#pragma omp parallel for schedule(dynamic, 100)
for (node u = 0; u < n; u++) {
for (node v = u + 1; v < n; v++) {
handle(u, v);
}
}
}
template <typename T>
void GraphBuilder::copyAndClear(std::vector<T>& source, std::vector<T>& target) {
std::copy(source.begin(), source.end(), std::back_inserter(target));
source.clear();
}
} /* namespace NetworKit */
#endif /* GRAPH_BUILDER_H */
|
Tanh.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/Tanh.c"
#else
void THNN_(Tanh_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output)
{
THTensor_(tanh)(output, input);
}
void THNN_(Tanh_updateGradInput)(
THNNState *state,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *output)
{
THNN_CHECK_SHAPE(output, gradOutput);
THTensor_(resizeAs)(gradInput, output);
if (THTensor_nDimensionLegacyAll(output) == 1 ||
!THTensor_(isContiguous)(output) ||
!THTensor_(isContiguous)(gradOutput) ||
!THTensor_(isContiguous)(gradInput))
{
TH_TENSOR_APPLY3(scalar_t, gradInput, scalar_t, gradOutput, scalar_t, output,
scalar_t z = *output_data; \
*gradInput_data = *gradOutput_data * (1. - z*z);
);
}
else
{
scalar_t* ptr_gradOutput = gradOutput->data<scalar_t>();
scalar_t* ptr_gradInput = gradInput->data<scalar_t>();
scalar_t* ptr_output = output->data<scalar_t>();
int64_t i;
#pragma omp parallel for private(i)
for (i = 0; i < THTensor_(nElement)(gradInput); i++)
{
scalar_t z = ptr_output[i];
ptr_gradInput[i] = ptr_gradOutput[i] * (1. - z*z);
}
}
}
#endif
|
profile.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP RRRR OOO FFFFF IIIII L EEEEE %
% P P R R O O F I L E %
% PPPP RRRR O O FFF I L EEE %
% P R R O O F I L E %
% P R R OOO F IIIII LLLLL EEEEE %
% %
% %
% MagickCore Image Profile Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/linked-list.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/option-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/token.h"
#include "MagickCore/utility.h"
#if defined(MAGICKCORE_LCMS_DELEGATE)
#if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H)
#include <wchar.h>
#include <lcms/lcms2.h>
#else
#include <wchar.h>
#include "lcms2.h"
#endif
#endif
#if defined(MAGICKCORE_XML_DELEGATE)
# if defined(MAGICKCORE_WINDOWS_SUPPORT)
# if !defined(__MINGW32__)
# include <win32config.h>
# endif
# endif
# include <libxml/parser.h>
# include <libxml/tree.h>
#endif
/*
Forward declarations
*/
static MagickBooleanType
SetImageProfileInternal(Image *,const char *,const StringInfo *,
const MagickBooleanType,ExceptionInfo *);
static void
WriteTo8BimProfile(Image *,const char*,const StringInfo *);
/*
Typedef declarations
*/
struct _ProfileInfo
{
char
*name;
size_t
length;
unsigned char
*info;
size_t
signature;
};
typedef struct _CMSExceptionInfo
{
Image
*image;
ExceptionInfo
*exception;
} CMSExceptionInfo;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageProfiles() clones one or more image profiles.
%
% The format of the CloneImageProfiles method is:
%
% MagickBooleanType CloneImageProfiles(Image *image,
% const Image *clone_image)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone_image: the clone image.
%
*/
MagickExport MagickBooleanType CloneImageProfiles(Image *image,
const Image *clone_image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clone_image != (const Image *) NULL);
assert(clone_image->signature == MagickCoreSignature);
if (clone_image->profiles != (void *) NULL)
{
if (image->profiles != (void *) NULL)
DestroyImageProfiles(image);
image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles,
(void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e l e t e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DeleteImageProfile() deletes a profile from the image by its name.
%
% The format of the DeleteImageProfile method is:
%
% MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return(MagickFalse);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageProfiles() releases memory associated with an image profile map.
%
% The format of the DestroyProfiles method is:
%
% void DestroyImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImageProfiles(Image *image)
{
if (image->profiles != (SplayTreeInfo *) NULL)
image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageProfile() gets a profile associated with an image by name.
%
% The format of the GetImageProfile method is:
%
% const StringInfo *GetImageProfile(const Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport const StringInfo *GetImageProfile(const Image *image,
const char *name)
{
const StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t N e x t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetNextImageProfile() gets the next profile name for an image.
%
% The format of the GetNextImageProfile method is:
%
% char *GetNextImageProfile(const Image *image)
%
% A description of each parameter follows:
%
% o hash_info: the hash info.
%
*/
MagickExport char *GetNextImageProfile(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((char *) NULL);
return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P r o f i l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ProfileImage() associates, applies, or removes an ICM, IPTC, or generic
% profile with / to / from an image. If the profile is NULL, it is removed
% from the image otherwise added or applied. Use a name of '*' and a profile
% of NULL to remove all profiles from the image.
%
% ICC and ICM profiles are handled as follows: If the image does not have
% an associated color profile, the one you provide is associated with the
% image and the image pixels are not transformed. Otherwise, the colorspace
% transform defined by the existing and new profile are applied to the image
% pixels and the new profile is associated with the image.
%
% The format of the ProfileImage method is:
%
% MagickBooleanType ProfileImage(Image *image,const char *name,
% const void *datum,const size_t length,const MagickBooleanType clone)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: Name of profile to add or remove: ICC, IPTC, or generic profile.
%
% o datum: the profile data.
%
% o length: the length of the profile.
%
% o clone: should be MagickFalse.
%
*/
#if defined(MAGICKCORE_LCMS_DELEGATE)
typedef struct _LCMSInfo
{
ColorspaceType
colorspace;
cmsUInt32Number
type;
size_t
channels;
cmsHPROFILE
profile;
int
intent;
double
scale,
translate;
void
**magick_restrict pixels;
} LCMSInfo;
#if LCMS_VERSION < 2060
static void* cmsGetContextUserData(cmsContext ContextID)
{
return(ContextID);
}
static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData)
{
magick_unreferenced(Plugin);
return((cmsContext) UserData);
}
static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID),
cmsLogErrorHandlerFunction Fn)
{
magick_unreferenced(ContextID);
cmsSetLogErrorHandler(Fn);
}
static void cmsDeleteContext(cmsContext magick_unused(ContextID))
{
magick_unreferenced(ContextID);
}
#endif
static void **DestroyPixelThreadSet(void **pixels)
{
register ssize_t
i;
if (pixels == (void **) NULL)
return((void **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (void *) NULL)
pixels[i]=RelinquishMagickMemory(pixels[i]);
pixels=(void **) RelinquishMagickMemory(pixels);
return(pixels);
}
static void **AcquirePixelThreadSet(const size_t columns,
const size_t channels,MagickBooleanType highres)
{
register ssize_t
i;
size_t
number_threads;
size_t
size;
void
**pixels;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(void **) AcquireQuantumMemory(number_threads,sizeof(*pixels));
if (pixels == (void **) NULL)
return((void **) NULL);
(void) memset(pixels,0,number_threads*sizeof(*pixels));
size=sizeof(double);
if (highres == MagickFalse)
size=sizeof(Quantum);
for (i=0; i < (ssize_t) number_threads; i++)
{
pixels[i]=AcquireQuantumMemory(columns,channels*size);
if (pixels[i] == (void *) NULL)
return(DestroyPixelThreadSet(pixels));
}
return(pixels);
}
static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform)
{
register ssize_t
i;
assert(transform != (cmsHTRANSFORM *) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (transform[i] != (cmsHTRANSFORM) NULL)
cmsDeleteTransform(transform[i]);
transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform);
return(transform);
}
static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info,
const LCMSInfo *target_info,const cmsUInt32Number flags,
cmsContext cms_context)
{
cmsHTRANSFORM
*transform;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads,
sizeof(*transform));
if (transform == (cmsHTRANSFORM *) NULL)
return((cmsHTRANSFORM *) NULL);
(void) memset(transform,0,number_threads*sizeof(*transform));
for (i=0; i < (ssize_t) number_threads; i++)
{
transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile,
source_info->type,target_info->profile,target_info->type,
target_info->intent,flags);
if (transform[i] == (cmsHTRANSFORM) NULL)
return(DestroyTransformThreadSet(transform));
}
return(transform);
}
static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity,
const char *message)
{
CMSExceptionInfo
*cms_exception;
ExceptionInfo
*exception;
Image
*image;
cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context);
if (cms_exception == (CMSExceptionInfo *) NULL)
return;
exception=cms_exception->exception;
if (exception == (ExceptionInfo *) NULL)
return;
image=cms_exception->image;
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s'","unknown context");
return;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s",
severity,message != (char *) NULL ? message : "no message");
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"UnableToTransformColorspace","`%s', %s (#%u)",image->filename,
message != (char *) NULL ? message : "no message",severity);
}
static void TransformDoublePixels(const int id,const Image* image,
const LCMSInfo *source_info,const LCMSInfo *target_info,
const cmsHTRANSFORM *transform,Quantum *q)
{
#define GetLCMSPixel(source_info,pixel) \
(source_info->scale*QuantumScale*(pixel)+source_info->translate)
#define SetLCMSPixel(target_info,pixel) \
ClampToQuantum(target_info->scale*QuantumRange*(pixel)+target_info->translate)
register double
*p;
register ssize_t
x;
p=(double *) source_info->pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=GetLCMSPixel(source_info,GetPixelRed(image,q));
if (source_info->channels > 1)
{
*p++=GetLCMSPixel(source_info,GetPixelGreen(image,q));
*p++=GetLCMSPixel(source_info,GetPixelBlue(image,q));
}
if (source_info->channels > 3)
*p++=GetLCMSPixel(source_info,GetPixelBlack(image,q));
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_info->pixels[id],
target_info->pixels[id],(unsigned int) image->columns);
p=(double *) target_info->pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_info->channels == 1)
SetPixelGray(image,SetLCMSPixel(target_info,*p),q);
else
SetPixelRed(image,SetLCMSPixel(target_info,*p),q);
p++;
if (target_info->channels > 1)
{
SetPixelGreen(image,SetLCMSPixel(target_info,*p),q);
p++;
SetPixelBlue(image,SetLCMSPixel(target_info,*p),q);
p++;
}
if (target_info->channels > 3)
{
SetPixelBlack(image,SetLCMSPixel(target_info,*p),q);
p++;
}
q+=GetPixelChannels(image);
}
}
static void TransformQuantumPixels(const int id,const Image* image,
const LCMSInfo *source_info,const LCMSInfo *target_info,
const cmsHTRANSFORM *transform,Quantum *q)
{
register Quantum
*p;
register ssize_t
x;
p=(Quantum *) source_info->pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
*p++=GetPixelRed(image,q);
if (source_info->channels > 1)
{
*p++=GetPixelGreen(image,q);
*p++=GetPixelBlue(image,q);
}
if (source_info->channels > 3)
*p++=GetPixelBlack(image,q);
q+=GetPixelChannels(image);
}
cmsDoTransform(transform[id],source_info->pixels[id],
target_info->pixels[id],(unsigned int) image->columns);
p=(Quantum *) target_info->pixels[id];
q-=GetPixelChannels(image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (target_info->channels == 1)
SetPixelGray(image,*p++,q);
else
SetPixelRed(image,*p++,q);
if (target_info->channels > 1)
{
SetPixelGreen(image,*p++,q);
SetPixelBlue(image,*p++,q);
}
if (target_info->channels > 3)
SetPixelBlack(image,*p++,q);
q+=GetPixelChannels(image);
}
}
#endif
static MagickBooleanType SetsRGBImageProfile(Image *image,
ExceptionInfo *exception)
{
static unsigned char
sRGBProfile[] =
{
0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00,
0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20,
0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a,
0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00,
0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6,
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99,
0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67,
0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70,
0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88,
0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c,
0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24,
0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14,
0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24,
0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14,
0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14,
0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14,
0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14,
0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14,
0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36,
0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76,
0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77,
0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39,
0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31,
0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75,
0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77,
0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20,
0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66,
0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61,
0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d,
0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52,
0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f,
0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57,
0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65,
0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e,
0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20,
0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69,
0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74,
0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e,
0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e,
0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e,
0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47,
0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61,
0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43,
0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44,
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63,
0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20,
0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00,
0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36,
0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c,
0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2,
0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d,
0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00,
0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0,
0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87,
0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4,
0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00,
0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19,
0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37,
0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54,
0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72,
0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90,
0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae,
0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb,
0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb,
0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d,
0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32,
0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59,
0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83,
0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1,
0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1,
0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14,
0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b,
0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84,
0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1,
0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00,
0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43,
0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a,
0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3,
0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20,
0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71,
0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4,
0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c,
0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77,
0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5,
0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37,
0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d,
0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07,
0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74,
0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5,
0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a,
0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2,
0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f,
0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf,
0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54,
0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc,
0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69,
0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9,
0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e,
0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26,
0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3,
0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64,
0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09,
0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3,
0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61,
0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13,
0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9,
0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84,
0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43,
0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06,
0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce,
0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b,
0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c,
0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41,
0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b,
0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa,
0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd,
0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5,
0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2,
0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3,
0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99,
0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94,
0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94,
0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98,
0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1,
0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf,
0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2,
0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda,
0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7,
0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18,
0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f,
0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b,
0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b,
0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1,
0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c,
0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c,
0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91,
0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb,
0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a,
0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f,
0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8,
0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37,
0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c,
0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05,
0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74,
0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8,
0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61,
0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0,
0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64,
0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee,
0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d,
0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12,
0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab,
0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b,
0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0,
0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a,
0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a,
0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00,
0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb,
0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c,
0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42,
0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f,
0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0,
0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8,
0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95,
0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78,
0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61,
0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f,
0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43,
0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d,
0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d,
0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43,
0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f,
0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60,
0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78,
0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95,
0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8,
0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1,
0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11,
0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46,
0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81,
0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2,
0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a,
0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57,
0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab,
0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04,
0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64,
0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca,
0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36,
0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8,
0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20,
0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f,
0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24,
0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf,
0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40,
0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8,
0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76,
0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a,
0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4,
0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75,
0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d,
0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea,
0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae,
0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79,
0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a,
0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21,
0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff,
0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3,
0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce,
0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf,
0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7,
0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5,
0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba,
0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6,
0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8,
0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1,
0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10,
0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36,
0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63,
0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96,
0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0,
0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11,
0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58,
0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7,
0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb,
0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57,
0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba,
0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff
};
StringInfo
*profile;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (GetImageProfile(image,"icc") != (const StringInfo *) NULL)
return(MagickFalse);
profile=AcquireStringInfo(sizeof(sRGBProfile));
SetStringInfoDatum(profile,sRGBProfile);
status=SetImageProfile(image,"icc",profile,exception);
profile=DestroyStringInfo(profile);
return(status);
}
MagickExport MagickBooleanType ProfileImage(Image *image,const char *name,
const void *datum,const size_t length,ExceptionInfo *exception)
{
#define ProfileImageTag "Profile/Image"
#ifndef TYPE_XYZ_8
#define TYPE_XYZ_8 (COLORSPACE_SH(PT_XYZ)|CHANNELS_SH(3)|BYTES_SH(1))
#endif
#define ThrowProfileException(severity,tag,context) \
{ \
if (profile != (StringInfo *) NULL) \
profile=DestroyStringInfo(profile); \
if (cms_context != (cmsContext) NULL) \
cmsDeleteContext(cms_context); \
if (source_info.profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(source_info.profile); \
if (target_info.profile != (cmsHPROFILE) NULL) \
(void) cmsCloseProfile(target_info.profile); \
ThrowBinaryException(severity,tag,context); \
}
MagickBooleanType
status;
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(name != (const char *) NULL);
if ((datum == (const void *) NULL) || (length == 0))
{
char
*next;
/*
Delete image profile(s).
*/
ResetImageProfileIterator(image);
for (next=GetNextImageProfile(image); next != (const char *) NULL; )
{
if (IsOptionMember(next,name) != MagickFalse)
{
(void) DeleteImageProfile(image,next);
ResetImageProfileIterator(image);
}
next=GetNextImageProfile(image);
}
return(MagickTrue);
}
/*
Add a ICC, IPTC, or generic profile to the image.
*/
status=MagickTrue;
profile=AcquireStringInfo((size_t) length);
SetStringInfoDatum(profile,(unsigned char *) datum);
if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0))
status=SetImageProfile(image,name,profile,exception);
else
{
const StringInfo
*icc_profile;
icc_profile=GetImageProfile(image,"icc");
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
const char
*value;
value=GetImageProperty(image,"exif:ColorSpace",exception);
(void) value;
if (LocaleCompare(value,"1") != 0)
(void) SetsRGBImageProfile(image,exception);
value=GetImageProperty(image,"exif:InteroperabilityIndex",exception);
if (LocaleCompare(value,"R98.") != 0)
(void) SetsRGBImageProfile(image,exception);
icc_profile=GetImageProfile(image,"icc");
}
if ((icc_profile != (const StringInfo *) NULL) &&
(CompareStringInfo(icc_profile,profile) == 0))
{
profile=DestroyStringInfo(profile);
return(MagickTrue);
}
#if !defined(MAGICKCORE_LCMS_DELEGATE)
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn",
"'%s' (LCMS)",image->filename);
#else
{
cmsContext
cms_context;
CMSExceptionInfo
cms_exception;
LCMSInfo
source_info,
target_info;
/*
Transform pixel colors as defined by the color profiles.
*/
cms_exception.image=image;
cms_exception.exception=exception;
cms_context=cmsCreateContext(NULL,&cms_exception);
if (cms_context == (cmsContext) NULL)
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler);
source_info.profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(profile),(cmsUInt32Number)
GetStringInfoLength(profile));
if (source_info.profile == (cmsHPROFILE) NULL)
{
cmsDeleteContext(cms_context);
ThrowBinaryException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) &&
(icc_profile == (StringInfo *) NULL))
status=SetImageProfile(image,name,profile,exception);
else
{
CacheView
*image_view;
cmsColorSpaceSignature
signature;
cmsHTRANSFORM
*magick_restrict transform;
cmsUInt32Number
flags;
#if !defined(MAGICKCORE_HDRI_SUPPORT)
const char
*artifact;
#endif
MagickBooleanType
highres;
MagickOffsetType
progress;
ssize_t
y;
target_info.profile=(cmsHPROFILE) NULL;
if (icc_profile != (StringInfo *) NULL)
{
target_info.profile=source_info.profile;
source_info.profile=cmsOpenProfileFromMemTHR(cms_context,
GetStringInfoDatum(icc_profile),
(cmsUInt32Number) GetStringInfoLength(icc_profile));
if (source_info.profile == (cmsHPROFILE) NULL)
ThrowProfileException(ResourceLimitError,
"ColorspaceColorProfileMismatch",name);
}
highres=MagickTrue;
#if !defined(MAGICKCORE_HDRI_SUPPORT)
artifact=GetImageArtifact(image,"profile:highres-transform");
if (IsStringFalse(artifact) != MagickFalse)
highres=MagickFalse;
#endif
if (highres != MagickFalse)
{
source_info.scale=1.0;
source_info.translate=0.0;
}
source_info.colorspace=sRGBColorspace;
source_info.channels=3;
switch (cmsGetColorSpace(source_info.profile))
{
case cmsSigCmykData:
{
source_info.colorspace=CMYKColorspace;
source_info.channels=4;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_CMYK_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_CMYK_16;
else
#endif
{
source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL;
source_info.scale=100.0;
}
break;
}
case cmsSigGrayData:
{
source_info.colorspace=GRAYColorspace;
source_info.channels=1;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_GRAY_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_GRAY_16;
else
#endif
source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL;
break;
}
case cmsSigLabData:
{
source_info.colorspace=LabColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_Lab_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_Lab_16;
else
#endif
{
source_info.type=(cmsUInt32Number) TYPE_Lab_DBL;
source_info.scale=100.0;
source_info.translate=(-0.5);
}
break;
}
case cmsSigRgbData:
{
source_info.colorspace=sRGBColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_RGB_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_RGB_16;
else
#endif
source_info.type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
source_info.colorspace=XYZColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_XYZ_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_XYZ_16;
else
#endif
source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
signature=cmsGetPCS(source_info.profile);
if (target_info.profile != (cmsHPROFILE) NULL)
signature=cmsGetColorSpace(target_info.profile);
if (highres != MagickFalse)
{
target_info.scale=1.0;
target_info.translate=0.0;
}
target_info.channels=3;
switch (signature)
{
case cmsSigCmykData:
{
target_info.colorspace=CMYKColorspace;
target_info.channels=4;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_CMYK_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_CMYK_16;
else
#endif
{
target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL;
target_info.scale=0.01;
}
break;
}
case cmsSigGrayData:
{
target_info.colorspace=GRAYColorspace;
target_info.channels=1;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_GRAY_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_GRAY_16;
else
#endif
target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL;
break;
}
case cmsSigLabData:
{
target_info.colorspace=LabColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_Lab_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_Lab_16;
else
#endif
{
target_info.type=(cmsUInt32Number) TYPE_Lab_DBL;
target_info.scale=0.01;
target_info.translate=0.5;
}
break;
}
case cmsSigRgbData:
{
target_info.colorspace=sRGBColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_RGB_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_RGB_16;
else
#endif
target_info.type=(cmsUInt32Number) TYPE_RGB_DBL;
break;
}
case cmsSigXYZData:
{
target_info.colorspace=XYZColorspace;
#if (MAGICKCORE_QUANTUM_DEPTH == 8)
if (highres == MagickFalse)
target_info.type=(cmsUInt32Number) TYPE_XYZ_8;
else
#elif (MAGICKCORE_QUANTUM_DEPTH == 16)
if (highres == MagickFalse)
source_info.type=(cmsUInt32Number) TYPE_XYZ_16;
else
#endif
target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL;
break;
}
default:
ThrowProfileException(ImageError,
"ColorspaceColorProfileMismatch",name);
}
switch (image->rendering_intent)
{
case AbsoluteIntent:
{
target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC;
break;
}
case PerceptualIntent:
{
target_info.intent=INTENT_PERCEPTUAL;
break;
}
case RelativeIntent:
{
target_info.intent=INTENT_RELATIVE_COLORIMETRIC;
break;
}
case SaturationIntent:
{
target_info.intent=INTENT_SATURATION;
break;
}
default:
{
target_info.intent=INTENT_PERCEPTUAL;
break;
}
}
flags=cmsFLAGS_HIGHRESPRECALC;
#if defined(cmsFLAGS_BLACKPOINTCOMPENSATION)
if (image->black_point_compensation != MagickFalse)
flags|=cmsFLAGS_BLACKPOINTCOMPENSATION;
#endif
transform=AcquireTransformThreadSet(&source_info,&target_info,
flags,cms_context);
if (transform == (cmsHTRANSFORM *) NULL)
ThrowProfileException(ImageError,"UnableToCreateColorTransform",
name);
/*
Transform image as dictated by the source & target image profiles.
*/
source_info.pixels=AcquirePixelThreadSet(image->columns,
source_info.channels,highres);
target_info.pixels=AcquirePixelThreadSet(image->columns,
target_info.channels,highres);
if ((source_info.pixels == (void **) NULL) ||
(target_info.pixels == (void **) NULL))
{
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
ThrowProfileException(ResourceLimitError,
"MemoryAllocationFailed",image->filename);
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
if (source_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(source_info.profile);
if (target_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_info.profile);
return(MagickFalse);
}
if (target_info.colorspace == CMYKColorspace)
(void) SetImageColorspace(image,target_info.colorspace,exception);
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (highres != MagickFalse)
TransformDoublePixels(id,image,&source_info,&target_info,transform,q);
else
TransformQuantumPixels(id,image,&source_info,&target_info,transform,q);
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ProfileImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) SetImageColorspace(image,target_info.colorspace,exception);
switch (signature)
{
case cmsSigRgbData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
TrueColorType : TrueColorAlphaType;
break;
}
case cmsSigCmykData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
break;
}
case cmsSigGrayData:
{
image->type=image->alpha_trait == UndefinedPixelTrait ?
GrayscaleType : GrayscaleAlphaType;
break;
}
default:
break;
}
target_info.pixels=DestroyPixelThreadSet(target_info.pixels);
source_info.pixels=DestroyPixelThreadSet(source_info.pixels);
transform=DestroyTransformThreadSet(transform);
if ((status != MagickFalse) &&
(cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass))
status=SetImageProfile(image,name,profile,exception);
if (target_info.profile != (cmsHPROFILE) NULL)
(void) cmsCloseProfile(target_info.profile);
}
(void) cmsCloseProfile(source_info.profile);
cmsDeleteContext(cms_context);
}
#endif
}
profile=DestroyStringInfo(profile);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e m o v e I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RemoveImageProfile() removes a named profile from the image and returns its
% value.
%
% The format of the RemoveImageProfile method is:
%
% void *RemoveImageProfile(Image *image,const char *name)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name.
%
*/
MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name)
{
StringInfo
*profile;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return((StringInfo *) NULL);
WriteTo8BimProfile(image,name,(StringInfo *) NULL);
profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *)
image->profiles,name);
return(profile);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t P r o f i l e I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImageProfileIterator() resets the image profile iterator. Use it in
% conjunction with GetNextImageProfile() to iterate over all the profiles
% associated with an image.
%
% The format of the ResetImageProfileIterator method is:
%
% ResetImageProfileIterator(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void ResetImageProfileIterator(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->profiles == (SplayTreeInfo *) NULL)
return;
ResetSplayTreeIterator((SplayTreeInfo *) image->profiles);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e P r o f i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageProfile() adds a named profile to the image. If a profile with the
% same name already exists, it is replaced. This method differs from the
% ProfileImage() method in that it does not apply CMS color profiles.
%
% The format of the SetImageProfile method is:
%
% MagickBooleanType SetImageProfile(Image *image,const char *name,
% const StringInfo *profile)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o name: the profile name, for example icc, exif, and 8bim (8bim is the
% Photoshop wrapper for iptc profiles).
%
% o profile: A StringInfo structure that contains the named profile.
%
*/
static void *DestroyProfile(void *profile)
{
return((void *) DestroyStringInfo((StringInfo *) profile));
}
static inline const unsigned char *ReadResourceByte(const unsigned char *p,
unsigned char *quantum)
{
*quantum=(*p++);
return(p);
}
static inline const unsigned char *ReadResourceLong(const unsigned char *p,
unsigned int *quantum)
{
*quantum=(unsigned int) (*p++) << 24;
*quantum|=(unsigned int) (*p++) << 16;
*quantum|=(unsigned int) (*p++) << 8;
*quantum|=(unsigned int) (*p++);
return(p);
}
static inline const unsigned char *ReadResourceShort(const unsigned char *p,
unsigned short *quantum)
{
*quantum=(unsigned short) (*p++) << 8;
*quantum|=(unsigned short) (*p++);
return(p);
}
static inline void WriteResourceLong(unsigned char *p,
const unsigned int quantum)
{
unsigned char
buffer[4];
buffer[0]=(unsigned char) (quantum >> 24);
buffer[1]=(unsigned char) (quantum >> 16);
buffer[2]=(unsigned char) (quantum >> 8);
buffer[3]=(unsigned char) quantum;
(void) memcpy(p,buffer,4);
}
static void WriteTo8BimProfile(Image *image,const char *name,
const StringInfo *profile)
{
const unsigned char
*datum,
*q;
register const unsigned char
*p;
size_t
length;
StringInfo
*profile_8bim;
ssize_t
count;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id,
profile_id;
if (LocaleCompare(name,"icc") == 0)
profile_id=0x040f;
else
if (LocaleCompare(name,"iptc") == 0)
profile_id=0x0404;
else
if (LocaleCompare(name,"xmp") == 0)
profile_id=0x0424;
else
return;
profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *)
image->profiles,"8bim");
if (profile_8bim == (StringInfo *) NULL)
return;
datum=GetStringInfoDatum(profile_8bim);
length=GetStringInfoLength(profile_8bim);
for (p=datum; p < (datum+length-16); )
{
q=p;
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((count & 0x01) != 0)
count++;
if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length))
break;
if (id != profile_id)
p+=count;
else
{
size_t
extent,
offset;
ssize_t
extract_extent;
StringInfo
*extract_profile;
extract_extent=0;
extent=(datum+length)-(p+count);
if (profile == (StringInfo *) NULL)
{
offset=(q-datum);
extract_profile=AcquireStringInfo(offset+extent);
(void) memcpy(extract_profile->datum,datum,offset);
}
else
{
offset=(p-datum);
extract_extent=profile->length;
if ((extract_extent & 0x01) != 0)
extract_extent++;
extract_profile=AcquireStringInfo(offset+extract_extent+extent);
(void) memcpy(extract_profile->datum,datum,offset-4);
WriteResourceLong(extract_profile->datum+offset-4,(unsigned int)
profile->length);
(void) memcpy(extract_profile->datum+offset,
profile->datum,profile->length);
}
(void) memcpy(extract_profile->datum+offset+extract_extent,
p+count,extent);
(void) AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString("8bim"),CloneStringInfo(extract_profile));
extract_profile=DestroyStringInfo(extract_profile);
break;
}
}
}
static void GetProfilesFromResourceBlock(Image *image,
const StringInfo *resource_block,ExceptionInfo *exception)
{
const unsigned char
*datum;
register const unsigned char
*p;
size_t
length;
ssize_t
count;
StringInfo
*profile;
unsigned char
length_byte;
unsigned int
value;
unsigned short
id;
datum=GetStringInfoDatum(resource_block);
length=GetStringInfoLength(resource_block);
for (p=datum; p < (datum+length-16); )
{
if (LocaleNCompare((char *) p,"8BIM",4) != 0)
break;
p+=4;
p=ReadResourceShort(p,&id);
p=ReadResourceByte(p,&length_byte);
p+=length_byte;
if (((length_byte+1) & 0x01) != 0)
p++;
if (p > (datum+length-4))
break;
p=ReadResourceLong(p,&value);
count=(ssize_t) value;
if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0))
break;
switch (id)
{
case 0x03ed:
{
unsigned int
resolution;
unsigned short
units;
/*
Resolution.
*/
if (count < 10)
break;
p=ReadResourceLong(p,&resolution);
image->resolution.x=((double) resolution)/65536.0;
p=ReadResourceShort(p,&units)+2;
p=ReadResourceLong(p,&resolution)+4;
image->resolution.y=((double) resolution)/65536.0;
/*
Values are always stored as pixels per inch.
*/
if ((ResolutionType) units != PixelsPerCentimeterResolution)
image->units=PixelsPerInchResolution;
else
{
image->units=PixelsPerCentimeterResolution;
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case 0x0404:
{
/*
IPTC Profile
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"iptc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x040c:
{
/*
Thumbnail.
*/
p+=count;
break;
}
case 0x040f:
{
/*
ICC Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"icc",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0422:
{
/*
EXIF Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"exif",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
case 0x0424:
{
/*
XMP Profile.
*/
profile=AcquireStringInfo(count);
SetStringInfoDatum(profile,p);
(void) SetImageProfileInternal(image,"xmp",profile,MagickTrue,
exception);
profile=DestroyStringInfo(profile);
p+=count;
break;
}
default:
{
p+=count;
break;
}
}
if ((count & 0x01) != 0)
p++;
}
}
#if defined(MAGICKCORE_XML_DELEGATE)
static MagickBooleanType ValidateXMPProfile(const StringInfo *profile)
{
xmlDocPtr
document;
/*
Parse XML profile.
*/
document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int)
GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR |
XML_PARSE_NOWARNING);
if (document == (xmlDocPtr) NULL)
return(MagickFalse);
xmlFreeDoc(document);
return(MagickTrue);
}
#else
static MagickBooleanType ValidateXMPProfile(const StringInfo *profile)
{
return(MagickFalse);
}
#endif
static MagickBooleanType SetImageProfileInternal(Image *image,const char *name,
const StringInfo *profile,const MagickBooleanType recursive,
ExceptionInfo *exception)
{
char
key[MagickPathExtent];
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((LocaleCompare(name,"xmp") == 0) &&
(ValidateXMPProfile(profile) == MagickFalse))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageWarning,
"CorruptImageProfile","`%s'",name);
return(MagickTrue);
}
if (image->profiles == (SplayTreeInfo *) NULL)
image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
DestroyProfile);
(void) CopyMagickString(key,name,MagickPathExtent);
LocaleLower(key);
status=AddValueToSplayTree((SplayTreeInfo *) image->profiles,
ConstantString(key),CloneStringInfo(profile));
if (status != MagickFalse)
{
if (LocaleCompare(name,"8bim") == 0)
GetProfilesFromResourceBlock(image,profile,exception);
else
if (recursive == MagickFalse)
WriteTo8BimProfile(image,name,profile);
}
return(status);
}
MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name,
const StringInfo *profile,ExceptionInfo *exception)
{
return(SetImageProfileInternal(image,name,profile,MagickFalse,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e P r o f i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageProfiles() synchronizes image properties with the image profiles.
% Currently we only support updating the EXIF resolution and orientation.
%
% The format of the SyncImageProfiles method is:
%
% MagickBooleanType SyncImageProfiles(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline int ReadProfileByte(unsigned char **p,size_t *length)
{
int
c;
if (*length < 1)
return(EOF);
c=(int) (*(*p)++);
(*length)--;
return(c);
}
static inline signed short ReadProfileShort(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned short
value;
if (endian == LSBEndian)
{
value=(unsigned short) buffer[1] << 8;
value|=(unsigned short) buffer[0];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
value=(unsigned short) buffer[0] << 8;
value|=(unsigned short) buffer[1];
quantum.unsigned_value=value & 0xffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileLong(const EndianType endian,
unsigned char *buffer)
{
union
{
unsigned int
unsigned_value;
signed int
signed_value;
} quantum;
unsigned int
value;
if (endian == LSBEndian)
{
value=(unsigned int) buffer[3] << 24;
value|=(unsigned int) buffer[2] << 16;
value|=(unsigned int) buffer[1] << 8;
value|=(unsigned int) buffer[0];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
value=(unsigned int) buffer[0] << 24;
value|=(unsigned int) buffer[1] << 16;
value|=(unsigned int) buffer[2] << 8;
value|=(unsigned int) buffer[3];
quantum.unsigned_value=value & 0xffffffff;
return(quantum.signed_value);
}
static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length)
{
signed int
value;
if (*length < 4)
return(0);
value=ReadProfileLong(MSBEndian,*p);
(*length)-=4;
*p+=4;
return(value);
}
static inline signed short ReadProfileMSBShort(unsigned char **p,
size_t *length)
{
signed short
value;
if (*length < 2)
return(0);
value=ReadProfileShort(MSBEndian,*p);
(*length)-=2;
*p+=2;
return(value);
}
static inline void WriteProfileLong(const EndianType endian,
const size_t value,unsigned char *p)
{
unsigned char
buffer[4];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
buffer[2]=(unsigned char) (value >> 16);
buffer[3]=(unsigned char) (value >> 24);
(void) memcpy(p,buffer,4);
return;
}
buffer[0]=(unsigned char) (value >> 24);
buffer[1]=(unsigned char) (value >> 16);
buffer[2]=(unsigned char) (value >> 8);
buffer[3]=(unsigned char) value;
(void) memcpy(p,buffer,4);
}
static void WriteProfileShort(const EndianType endian,
const unsigned short value,unsigned char *p)
{
unsigned char
buffer[2];
if (endian == LSBEndian)
{
buffer[0]=(unsigned char) value;
buffer[1]=(unsigned char) (value >> 8);
(void) memcpy(p,buffer,2);
return;
}
buffer[0]=(unsigned char) (value >> 8);
buffer[1]=(unsigned char) value;
(void) memcpy(p,buffer,2);
}
static MagickBooleanType Sync8BimProfile(Image *image,StringInfo *profile)
{
size_t
length;
ssize_t
count;
unsigned char
*p;
unsigned short
id;
length=GetStringInfoLength(profile);
p=GetStringInfoDatum(profile);
while (length != 0)
{
if (ReadProfileByte(&p,&length) != 0x38)
continue;
if (ReadProfileByte(&p,&length) != 0x42)
continue;
if (ReadProfileByte(&p,&length) != 0x49)
continue;
if (ReadProfileByte(&p,&length) != 0x4D)
continue;
if (length < 7)
return(MagickFalse);
id=ReadProfileMSBShort(&p,&length);
count=(ssize_t) ReadProfileByte(&p,&length);
if ((count >= (ssize_t) length) || (count < 0))
return(MagickFalse);
p+=count;
length-=count;
if ((*p & 0x01) == 0)
(void) ReadProfileByte(&p,&length);
count=(ssize_t) ReadProfileMSBLong(&p,&length);
if ((count > (ssize_t) length) || (count < 0))
return(MagickFalse);
if ((id == 0x3ED) && (count == 16))
{
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*2.54*
65536.0),p);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.x*
65536.0),p);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4);
if (image->units == PixelsPerCentimeterResolution)
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*2.54*
65536.0),p+8);
else
WriteProfileLong(MSBEndian,(unsigned int) (image->resolution.y*
65536.0),p+8);
WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12);
}
p+=count;
length-=count;
}
return(MagickTrue);
}
MagickBooleanType SyncExifProfile(Image *image,StringInfo *profile)
{
#define MaxDirectoryStack 16
#define EXIF_DELIMITER "\n"
#define EXIF_NUM_FORMATS 12
#define TAG_EXIF_OFFSET 0x8769
#define TAG_INTEROP_OFFSET 0xa005
typedef struct _DirectoryInfo
{
unsigned char
*directory;
size_t
entry;
} DirectoryInfo;
DirectoryInfo
directory_stack[MaxDirectoryStack];
EndianType
endian;
size_t
entry,
length,
number_entries;
SplayTreeInfo
*exif_resources;
ssize_t
id,
level,
offset;
static int
format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8};
unsigned char
*directory,
*exif;
/*
Set EXIF resolution tag.
*/
length=GetStringInfoLength(profile);
exif=GetStringInfoDatum(profile);
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
if ((id != 0x4949) && (id != 0x4D4D))
{
while (length != 0)
{
if (ReadProfileByte(&exif,&length) != 0x45)
continue;
if (ReadProfileByte(&exif,&length) != 0x78)
continue;
if (ReadProfileByte(&exif,&length) != 0x69)
continue;
if (ReadProfileByte(&exif,&length) != 0x66)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
if (ReadProfileByte(&exif,&length) != 0x00)
continue;
break;
}
if (length < 16)
return(MagickFalse);
id=(ssize_t) ReadProfileShort(LSBEndian,exif);
}
endian=LSBEndian;
if (id == 0x4949)
endian=LSBEndian;
else
if (id == 0x4D4D)
endian=MSBEndian;
else
return(MagickFalse);
if (ReadProfileShort(endian,exif+2) != 0x002a)
return(MagickFalse);
/*
This the offset to the first IFD.
*/
offset=(ssize_t) ReadProfileLong(endian,exif+4);
if ((offset < 0) || ((size_t) offset >= length))
return(MagickFalse);
directory=exif+offset;
level=0;
entry=0;
exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL,
(void *(*)(void *)) NULL,(void *(*)(void *)) NULL);
do
{
if (level > 0)
{
level--;
directory=directory_stack[level].directory;
entry=directory_stack[level].entry;
}
if ((directory < exif) || (directory > (exif+length-2)))
break;
/*
Determine how many entries there are in the current IFD.
*/
number_entries=ReadProfileShort(endian,directory);
for ( ; entry < number_entries; entry++)
{
int
components;
register unsigned char
*p,
*q;
size_t
number_bytes;
ssize_t
format,
tag_value;
q=(unsigned char *) (directory+2+(12*entry));
if (q > (exif+length-12))
break; /* corrupt EXIF */
if (GetValueFromSplayTree(exif_resources,q) == q)
break;
(void) AddValueToSplayTree(exif_resources,q,q);
tag_value=(ssize_t) ReadProfileShort(endian,q);
format=(ssize_t) ReadProfileShort(endian,q+2);
if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS))
break;
components=(int) ReadProfileLong(endian,q+4);
if (components < 0)
break; /* corrupt EXIF */
number_bytes=(size_t) components*format_bytes[format];
if ((ssize_t) number_bytes < components)
break; /* prevent overflow */
if (number_bytes <= 4)
p=q+8;
else
{
/*
The directory entry contains an offset.
*/
offset=(ssize_t) ReadProfileLong(endian,q+8);
if ((offset < 0) || ((size_t) (offset+number_bytes) > length))
continue;
if (~length < number_bytes)
continue; /* prevent overflow */
p=(unsigned char *) (exif+offset);
}
switch (tag_value)
{
case 0x011a:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x011b:
{
(void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p);
if (number_bytes == 8)
(void) WriteProfileLong(endian,1UL,p+4);
break;
}
case 0x0112:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) image->orientation,p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) image->orientation,
p);
break;
}
case 0x0128:
{
if (number_bytes == 4)
{
(void) WriteProfileLong(endian,(size_t) (image->units+1),p);
break;
}
(void) WriteProfileShort(endian,(unsigned short) (image->units+1),p);
break;
}
default:
break;
}
if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET))
{
offset=(ssize_t) ReadProfileLong(endian,p);
if (((size_t) offset < length) && (level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=directory;
entry++;
directory_stack[level].entry=entry;
level++;
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
if ((directory+2+(12*number_entries)) > (exif+length))
break;
offset=(ssize_t) ReadProfileLong(endian,directory+2+(12*
number_entries));
if ((offset != 0) && ((size_t) offset < length) &&
(level < (MaxDirectoryStack-2)))
{
directory_stack[level].directory=exif+offset;
directory_stack[level].entry=0;
level++;
}
}
break;
}
}
} while (level > 0);
exif_resources=DestroySplayTree(exif_resources);
return(MagickTrue);
}
MagickPrivate MagickBooleanType SyncImageProfiles(Image *image)
{
MagickBooleanType
status;
StringInfo
*profile;
status=MagickTrue;
profile=(StringInfo *) GetImageProfile(image,"8BIM");
if (profile != (StringInfo *) NULL)
if (Sync8BimProfile(image,profile) == MagickFalse)
status=MagickFalse;
profile=(StringInfo *) GetImageProfile(image,"EXIF");
if (profile != (StringInfo *) NULL)
if (SyncExifProfile(image,profile) == MagickFalse)
status=MagickFalse;
return(status);
}
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color-private.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/quantum.h"
#include "magick/resource_.h"
#include "magick/thread-private.h"
#include "magick/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: Define the width and height of the border.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickPixelPacket
accentuate,
border,
highlight,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
x=(ssize_t) frame_info->width-frame_info->x-bevel_width;
y=(ssize_t) frame_info->height-frame_info->y-bevel_width;
if ((x < (ssize_t) image->columns) || (y < (ssize_t) image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass) == MagickFalse)
{
InheritException(exception,&frame_image->exception);
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if ((IsPixelGray(&frame_image->border_color) == MagickFalse) &&
(IsGrayColorspace(frame_image->colorspace) != MagickFalse))
(void) SetImageColorspace(frame_image,sRGBColorspace);
if ((frame_image->border_color.opacity != OpaqueOpacity) &&
(frame_image->matte == MagickFalse))
(void) SetImageAlphaChannel(frame_image,OpaqueAlphaChannel);
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
GetMagickPixelPacket(frame_image,&matte);
matte.colorspace=sRGBColorspace;
SetMagickPixelPacket(frame_image,&image->matte_color,(IndexPacket *) NULL,
&matte);
GetMagickPixelPacket(frame_image,&border);
border.colorspace=sRGBColorspace;
SetMagickPixelPacket(frame_image,&image->border_color,(IndexPacket *) NULL,
&border);
GetMagickPixelPacket(frame_image,&accentuate);
accentuate.red=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(MagickRealType) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&highlight);
highlight.red=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(MagickRealType) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&shadow);
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.opacity=matte.opacity;
GetMagickPixelPacket(frame_image,&trough);
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.opacity=matte.opacity;
if (image->colorspace == CMYKColorspace)
{
ConvertRGBToCMYK(&matte);
ConvertRGBToCMYK(&border);
ConvertRGBToCMYK(&accentuate);
ConvertRGBToCMYK(&highlight);
ConvertRGBToCMYK(&shadow);
ConvertRGBToCMYK(&trough);
}
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
frame_view=AcquireAuthenticCacheView(frame_image,exception);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register IndexPacket
*magick_restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
if (q != (PixelPacket *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,frame_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*magick_restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
/*
Set frame interior pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(frame_image,&border,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FrameImage)
#endif
proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register IndexPacket
*magick_restrict frame_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (PixelPacket *) NULL)
{
/*
Draw bottom of ornamental border.
*/
frame_indexes=GetCacheViewAuthenticIndexQueue(frame_view);
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
else
SetPixelPacket(frame_image,&accentuate,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelPacket(frame_image,&matte,q,frame_indexes);
q++;
frame_indexes++;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
q++;
frame_indexes++;
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelPacket(frame_image,&highlight,q,frame_indexes);
q++;
frame_indexes++;
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelPacket(frame_image,&shadow,q,frame_indexes);
else
SetPixelPacket(frame_image,&trough,q,frame_indexes);
q++;
frame_indexes++;
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (status != MagickFalse)
status=CompositeImage(frame_image,image->compose,image,x,y);
if (status == MagickFalse)
frame_image=DestroyImage(frame_image);
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=QuantumRange;
}
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,raise_info->height,1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*AccentuateFactor+(MagickRealType) foreground*
(QuantumRange-AccentuateFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows-2*raise_info->height,1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q++;
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows-raise_info->height,1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*HighlightFactor+(MagickRealType) foreground*
(QuantumRange-HighlightFactor))));
q++;
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*TroughFactor+(MagickRealType) background*
(QuantumRange-TroughFactor))));
q++;
}
for ( ; x < (ssize_t) image->columns; x++)
{
SetPixelRed(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelRed(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelGreen(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelGreen(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
SetPixelBlue(q,ClampToQuantum(QuantumScale*((MagickRealType)
GetPixelBlue(q)*ShadowFactor+(MagickRealType) background*
(QuantumRange-ShadowFactor))));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
effects.c | #include <omp.h>
#include <stdlib.h>
#include "effects.h"
#include "log.h"
static void blur_h(uint32_t *dest, uint32_t *src, int width, int height,
int radius) {
double coeff = 1.0 / (radius * 2 + 1);
#pragma omp parallel for
for (int i = 0; i < height; ++i) {
int iwidth = i * width;
double r_acc = 0.0;
double g_acc = 0.0;
double b_acc = 0.0;
for (int j = -radius; j < width; ++j) {
if (j - radius - 1 >= 0) {
int index = iwidth + j - radius - 1;
r_acc -= coeff * ((src[index] & 0xff0000) >> 16);
g_acc -= coeff * ((src[index] & 0x00ff00) >> 8);
b_acc -= coeff * ((src[index] & 0x0000ff));
}
if (j + radius < width) {
int index = iwidth + j + radius;
r_acc += coeff * ((src[index] & 0xff0000) >> 16);
g_acc += coeff * ((src[index] & 0x00ff00) >> 8);
b_acc += coeff * ((src[index] & 0x0000ff));
}
if (j < 0)
continue;
int index = iwidth + j;
dest[index] = 0 |
(((uint32_t)(r_acc + 0.5) & 0xff) << 16) |
(((uint32_t)(g_acc + 0.5) & 0xff) << 8) |
(((uint32_t)(b_acc + 0.5) & 0xff));
}
}
}
static void blur_v(uint32_t *dest, uint32_t *src, int width, int height,
int radius) {
double coeff = 1.0 / (radius * 2 + 1);
#pragma omp parallel for
for (int j = 0; j < width; ++j) {
double r_acc = 0.0;
double g_acc = 0.0;
double b_acc = 0.0;
for (int i = -radius; i < height; ++i) {
if (i - radius - 1 >= 0) {
int index = (i - radius - 1) * width + j;
r_acc -= coeff * ((src[index] & 0xff0000) >> 16);
g_acc -= coeff * ((src[index] & 0x00ff00) >> 8);
b_acc -= coeff * ((src[index] & 0x0000ff));
}
if (i + radius < height) {
int index = (i + radius) * width + j;
r_acc += coeff * ((src[index] & 0xff0000) >> 16);
g_acc += coeff * ((src[index] & 0x00ff00) >> 8);
b_acc += coeff * ((src[index] & 0x0000ff));
}
if (i < 0)
continue;
int index = i * width + j;
dest[index] = 0 |
(((uint32_t)(r_acc + 0.5) & 0xff) << 16) |
(((uint32_t)(g_acc + 0.5) & 0xff) << 8) |
(((uint32_t)(b_acc + 0.5) & 0xff));
}
}
}
static void blur_once(uint32_t *dest, uint32_t *src, uint32_t *scratch,
int width, int height, int radius) {
blur_h(scratch, src, width, height, radius);
blur_v(dest, scratch, width, height, radius);
}
// This effect_blur function, and the associated blur_* functions,
// are my own adaptations of code in yvbbrjdr's i3lock-fancy-rapid:
// https://github.com/yvbbrjdr/i3lock-fancy-rapid
static void effect_blur(uint32_t *dest, uint32_t *src, int width, int height,
int radius, int times) {
uint32_t *origdest = dest;
uint32_t *scratch = malloc(width * height * sizeof(*scratch));
blur_once(dest, src, scratch, width, height, radius);
for (int i = 0; i < times - 1; ++i) {
uint32_t *tmp = src;
src = dest;
dest = tmp;
blur_once(dest, src, scratch, width, height, radius);
}
free(scratch);
// We're flipping between using dest and src;
// if the last buffer we used was src, copy that over to dest.
if (dest != origdest)
memcpy(origdest, dest, width * height * sizeof(*dest));
}
static void effect_scale(uint32_t *dest, uint32_t *src, int swidth, int sheight,
double scale) {
int dwidth = swidth * scale;
int dheight = sheight * scale;
double fact = 1.0 / scale;
#pragma omp parallel for
for (int dy = 0; dy < dheight; ++dy) {
int sy = dy * fact;
if (sy >= sheight) continue;
for (int dx = 0; dx < dwidth; ++dx) {
int sx = dx * fact;
if (sx >= swidth) continue;
dest[dy * dwidth + dx] = src[sy * swidth + sx];
}
}
}
static void effect_greyscale(uint32_t *data, int width, int height) {
#pragma omp parallel for
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
int index = y * width + x;
int r = (data[index] & 0xff0000) >> 16;
int g = (data[index] & 0x00ff00) >> 8;
int b = (data[index] & 0x0000ff);
int luma = 0.2989 * r + 0.5870 * g + 0.1140 * b;
if (luma < 0) luma = 0;
if (luma > 255) luma = 255;
luma &= 0xFF;
data[index] = luma << 16 | luma << 8 | luma;
}
}
}
cairo_surface_t *swaylock_effects_run(cairo_surface_t *surface,
struct swaylock_effect *effects, int count) {
for (int i = 0; i < count; ++i) {
struct swaylock_effect *effect = &effects[i];
switch (effect->tag) {
case EFFECT_BLUR: {
cairo_surface_t *surf = cairo_image_surface_create(
CAIRO_FORMAT_RGB24,
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface));
if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) {
swaylock_log(LOG_ERROR, "Failed to create surface for blur effect");
cairo_surface_destroy(surf);
break;
}
effect_blur(
(uint32_t *)cairo_image_surface_get_data(surf),
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.blur.radius, effect->e.blur.times);
cairo_surface_flush(surf);
cairo_surface_destroy(surface);
surface = surf;
break;
}
case EFFECT_SCALE: {
cairo_surface_t *surf = cairo_image_surface_create(
CAIRO_FORMAT_RGB24,
cairo_image_surface_get_width(surface) * effect->e.scale,
cairo_image_surface_get_height(surface) * effect->e.scale);
if (cairo_surface_status(surf) != CAIRO_STATUS_SUCCESS) {
swaylock_log(LOG_ERROR, "Failed to create surface for scale effect");
cairo_surface_destroy(surf);
break;
}
effect_scale(
(uint32_t *)cairo_image_surface_get_data(surf),
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface),
effect->e.scale);
cairo_surface_flush(surf);
cairo_surface_destroy(surface);
surface = surf;
break;
}
case EFFECT_GREYSCALE: {
effect_greyscale(
(uint32_t *)cairo_image_surface_get_data(surface),
cairo_image_surface_get_width(surface),
cairo_image_surface_get_height(surface));
cairo_surface_flush(surface);
break;
} }
}
return surface;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
DynamicGraph.h | /*
open source routing machine
Copyright (C) Dennis Luxen, others 2010
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU AFFERO General Public License as published by
the Free Software Foundation; either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
or see http://www.gnu.org/licenses/agpl.txt.
*/
#ifndef DYNAMICGRAPH_H_INCLUDED
#define DYNAMICGRAPH_H_INCLUDED
#include <vector>
#include <algorithm>
#include <limits>
template< typename EdgeData>
class DynamicGraph {
public:
typedef unsigned NodeIterator;
typedef unsigned EdgeIterator;
class InputEdge {
public:
NodeIterator source;
NodeIterator target;
EdgeData data;
bool operator<( const InputEdge& right ) const {
if ( source != right.source )
return source < right.source;
return target < right.target;
}
};
DynamicGraph( int nodes, const std::vector< InputEdge > &graph )
{
m_numNodes = nodes;
m_numEdges = ( EdgeIterator ) graph.size();
m_nodes.reserve( m_numNodes );
m_nodes.resize( m_numNodes );
EdgeIterator edge = 0;
EdgeIterator position = 0;
for ( NodeIterator node = 0; node < m_numNodes; ++node ) {
EdgeIterator lastEdge = edge;
while ( edge < m_numEdges && graph[edge].source == node ) {
++edge;
}
m_nodes[node].firstEdge = position;
m_nodes[node].edges = edge - lastEdge;
position += m_nodes[node].edges;
}
m_edges.reserve( position * 1.2 );
m_edges.resize( position );
edge = 0;
for ( NodeIterator node = 0; node < m_numNodes; ++node ) {
for ( EdgeIterator i = m_nodes[node].firstEdge, e = m_nodes[node].firstEdge + m_nodes[node].edges; i != e; ++i ) {
m_edges[i].target = graph[edge].target;
m_edges[i].data = graph[edge].data;
assert(graph[edge].data.distance > 0);
edge++;
}
}
}
~DynamicGraph()
{
}
unsigned GetNumberOfNodes() const
{
return m_numNodes;
}
unsigned GetNumberOfEdges() const
{
return m_numEdges;
}
unsigned GetOutDegree( const NodeIterator &n ) const
{
return m_nodes[n].edges;
}
NodeIterator GetTarget( const EdgeIterator &e ) const
{
return NodeIterator( m_edges[e].target );
}
EdgeData &GetEdgeData( const EdgeIterator &e )
{
return m_edges[e].data;
}
const EdgeData &GetEdgeData( const EdgeIterator &e ) const
{
return m_edges[e].data;
}
EdgeIterator BeginEdges( const NodeIterator &n ) const
{
//assert( EndEdges( n ) - EdgeIterator( _nodes[n].firstEdge ) <= 100 );
return EdgeIterator( m_nodes[n].firstEdge );
}
EdgeIterator EndEdges( const NodeIterator &n ) const
{
return EdgeIterator( m_nodes[n].firstEdge + m_nodes[n].edges );
}
//adds an edge. Invalidates edge iterators for the source node
EdgeIterator InsertEdge( const NodeIterator &from, const NodeIterator &to, const EdgeData &data )
{
Node &node = m_nodes[from];
EdgeIterator newFirstEdge = node.edges + node.firstEdge;
if ( newFirstEdge >= m_edges.size() || !isDummy( newFirstEdge ) ) {
if ( node.firstEdge != 0 && isDummy( node.firstEdge - 1 ) ) {
node.firstEdge--;
m_edges[node.firstEdge] = m_edges[node.firstEdge + node.edges];
} else {
EdgeIterator newFirstEdge = ( EdgeIterator ) m_edges.size();
unsigned newSize = node.edges * 1.2 + 2;
EdgeIterator requiredCapacity = newSize + m_edges.size();
EdgeIterator oldCapacity = m_edges.capacity();
if ( requiredCapacity >= oldCapacity ) {
m_edges.reserve( requiredCapacity * 1.1 );
}
m_edges.resize( m_edges.size() + newSize );
for ( EdgeIterator i = 0; i < node.edges; ++i ) {
m_edges[newFirstEdge + i ] = m_edges[node.firstEdge + i];
makeDummy( node.firstEdge + i );
}
for ( EdgeIterator i = node.edges + 1; i < newSize; i++ )
makeDummy( newFirstEdge + i );
node.firstEdge = newFirstEdge;
}
}
Edge &edge = m_edges[node.firstEdge + node.edges];
edge.target = to;
edge.data = data;
m_numEdges++;
node.edges++;
return EdgeIterator( node.firstEdge + node.edges );
}
//removes an edge. Invalidates edge iterators for the source node
void DeleteEdge( const NodeIterator source, const EdgeIterator &e ) {
Node &node = m_nodes[source];
--m_numEdges;
--node.edges;
const unsigned last = node.firstEdge + node.edges;
//swap with last edge
m_edges[e] = m_edges[last];
makeDummy( last );
}
//removes all edges (source,target)
int DeleteEdgesTo( const NodeIterator source, const NodeIterator target )
{
int deleted = 0;
for ( EdgeIterator i = BeginEdges( source ), iend = EndEdges( source ); i < iend - deleted; ++i ) {
if ( m_edges[i].target == target ) {
do {
deleted++;
m_edges[i] = m_edges[iend - deleted];
makeDummy( iend - deleted );
} while ( i < iend - deleted && m_edges[i].target == target );
}
}
#pragma omp atomic
m_numEdges -= deleted;
m_nodes[source].edges -= deleted;
return deleted;
}
//searches for a specific edge
EdgeIterator FindEdge( const NodeIterator &from, const NodeIterator &to ) const
{
for ( EdgeIterator i = BeginEdges( from ), iend = EndEdges( from ); i != iend; ++i ) {
if ( m_edges[i].target == to ) {
return i;
}
}
return EndEdges( from );
}
protected:
bool isDummy( EdgeIterator edge ) const
{
return m_edges[edge].target == std::numeric_limits< NodeIterator >::max();
}
void makeDummy( EdgeIterator edge )
{
m_edges[edge].target = std::numeric_limits< NodeIterator >::max();
}
struct Node {
//index of the first edge
EdgeIterator firstEdge;
//amount of edges
unsigned edges;
};
struct Edge {
NodeIterator target;
EdgeData data;
};
NodeIterator m_numNodes;
EdgeIterator m_numEdges;
std::vector< Node > m_nodes;
std::vector< Edge > m_edges;
};
#endif // DYNAMICGRAPH_H_INCLUDED
|
power.c | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include "nb/memory_bot.h"
#include "nb/solver_bot/vector.h"
#include "nb/solver_bot/sparse/sparse.h"
#include "nb/solver_bot/sparse/eigen/power.h"
#include "../sparse_struct.h"
#define POW2(a) ((a)*(a))
void nb_sparse_eigen_power(const nb_sparse_t* const A, int h,
double **_eigenvecs,/* Out */
double *_eigenvals, /* Out */
int *it, /* Out */
double tolerance,
uint32_t omp_parallel_threads){
/* The program must receive all the pointers allocated, where
* > A is a nb_sparse_t matrix
* > _eigenvecs is an array of size h to store h eigenvectors.
* > _eigenvals is an array of size h to store the h greatest
* eigenvalues approximated.
* > h is the number of eigenvalues to be computed.
* > '*it' will store (after computation) the iterations needed
* to compute each eigenvalue (is a return value).
*/
/* Declare structures and variables to be used */
uint32_t i, j, c, d; /* Iterative variables */
double pnorm, rnorm2;
/* Allocate memory for structures */
double *p = nb_allocate_zero_mem(A->N * sizeof(double));
/* Deflation power method */
for (i = 0; i < h; i++) {
it[i] = 0;
rnorm2 = 1;
/* Initialize q0 such that ||qk||=1 */
_eigenvecs[i][0] = 1;
for (j = 1; j < A->N; j++)
_eigenvecs[i][j] = 0;
for (c = 0; c < A->N; c++) {
p[c] = 0;
if(A->rows_index[c][0] == 0)
p[c] = A->rows_values[c][0];
}
/* Start loop */
while (rnorm2 > POW2(tolerance)) {
/* Step 1 */
pnorm = nb_vector_get_norm(p, A->N);
for (c = 0; c < A->N; c++)
_eigenvecs[i][c] = p[c]/pnorm;
/* Step 2 */
for (j = 0; j < i; j++) {
double alpha = 0;
#pragma omp parallel for reduction(+:alpha) num_threads(omp_parallel_threads) schedule(guided) private(c)
for(c=0; c < A->N; c++)
alpha += _eigenvecs[i][c]*_eigenvecs[j][c];
#pragma omp parallel for num_threads(omp_parallel_threads) private(c)
for(c=0; c < A->N; c++)
_eigenvecs[i][c] -= alpha*_eigenvecs[j][c];
}
/* Step 3 */
/* Paralelize the operation pk = A*qk */
#pragma omp parallel for schedule(guided) num_threads(omp_parallel_threads) private(c, d)
for(c=0; c < A->N; c++){
p[c] = 0;
for(d=0; d < A->rows_size[c]; d++)
p[c] += A->rows_values[c][d]
*_eigenvecs[i][A->rows_index[c][d]];
}
/* Step 4 */
double lambda = 0;
for(c=0; c < A->N; c++)
lambda += _eigenvecs[i][c]*p[c];
_eigenvals[i] = lambda;
/* Step 5 and 6 */
rnorm2 = 0;
for(c=0; c < A->N; c++)
rnorm2 += POW2(p[c]-lambda*_eigenvecs[i][c]);
it[i]++;
}
}
/* Free memory */
nb_free_mem(p);
}
|
pvm-OpenMP-columnas.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int N = atoi(argv[1]);
int i,j;
int m[N][N];
int v1[N],v2[N];
double start,end,elapsed;
if(argc < 2) {
fprintf(stderr,"Faltan argumentos\n");
exit(-1);
}
//Inicializamos
for(i = 0; i<N;i++){
v1[i]= i;
v2[i] = 0;
for(j=0;j<N;j++)
m[i][j] = i + j;
}
start = omp_get_wtime();
//Multiplicamos
for (i = 0; i < N; ++i){
int suma = 0;
#pragma omp parallel for
for (j = 0; j < N; ++j)
v2[i] += m[i][j] * v1[j];
}
end = omp_get_wtime();
elapsed = end - start;
//Imprimimos
printf("Vector Resultante\n");
// for(i = 0; i<N;i++)
// printf("v2[%d] = %d\n",i,v2[i]);
printf("v2[%d] = %d\n",0,v2[0]);
printf("v2[%d] = %d\n",N-1,v2[N-1]);
printf("Tiempo(seg.):%11.9f\t / Tamaño Vectores:%u\n",elapsed,N);
}
|
accessibility.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <assert.h>
#include <stdint.h>
#include <error.h>
#include <limits.h>
#include <time.h>
#include <pthread.h>
#include "libxml/parser.h"
#include "libxml/tree.h"
#include "libxml/HTMLparser.h"
#include "queue.c"
#include <omp.h>
int IMAGE_COUNT = 0;
int ALT_COUNT = 0;
int NCORES = -1;
// print the DOM tree
void print_properties(xmlNode *node) {
xmlAttr *property = node->properties;
while (property != NULL) {
const xmlChar *name = property->name;
xmlChar *value = xmlGetProp(node, name);
printf("PropName: %s %s\n", name, value);
property = property->next;
}
}
int get_child_length(xmlNode *node) {
int length = 0;
xmlNode *cur = node;
while (cur != NULL){
length++;
cur = cur->next;
}
return length;
}
xmlNode **convert_to_array(xmlNode *node, int size) {
int index = 0;
xmlNode *cur = node;
xmlNode **array = (xmlNode**)malloc(size * sizeof(xmlNode*));
while (cur != NULL){
array[index] = cur;
index++;
cur = cur->next;
}
return array;
}
// check if a tag has alt text
// Parallelized this while loop like this after viewing http://web.engr.oregonstate.edu/~mjb/cs575/Handouts/tasks.1pp.pdf slide 9 but this causes slowdown
// :input node (xmlNode*) - a parsed node of the DOM tree
// :output none - increments ALT_COUNT if tag has alt text
void check_alt_text(xmlNode *node) {
xmlAttr *property = node->properties;
while (property != NULL) {
const xmlChar *name = property->name;
if (strcmp((const char*)name, "alt") == 0) {
xmlChar *value = xmlGetProp(node, name);
if (strcmp((const char*)value, "") != 0) {
ALT_COUNT++;
}
}
property = property->next;
}
}
// check if alt text is needed for a tag
// :input node (xmlNode*) - a parsed node of the DOM tree
// :output bool - true if alt text is needed, false otherwise
bool check_if_alt_needed(xmlNode *node) {
const char *name = (const char*)node->name;
return strcmp(name, "img") == 0 ||
strcmp(name, "area") == 0 ||
strcmp(name, "input") == 0;
}
void traverse_dom_tree_dfs(xmlNode *node, int depth) {
if (node == NULL) {
//printf("depth: %d, imageCount: %d, altCount: %d\n", depth, IMAGE_COUNT, ALT_COUNT);
return;
}
int size = get_child_length(node);
xmlNode **nodes = convert_to_array(node, size);
if (depth < 5) {
#pragma omp parallel for num_threads(NCORES)
for (int i = 0; i < size; i++) {
xmlNode *cur_node = nodes[i];
if (cur_node->type == XML_ELEMENT_NODE) {
if (check_if_alt_needed(cur_node)) {
#pragma omp critical
{
IMAGE_COUNT++;
check_alt_text(cur_node);
}
}
}
// #pragma omp task
traverse_dom_tree_dfs(cur_node->children, depth++);
}
} else if (depth > 50) {
for (int i = 0; i < size; i++) {
xmlNode *cur_node = nodes[i];
if (cur_node->type == XML_ELEMENT_NODE) {
if (check_if_alt_needed(cur_node)) {
#pragma omp critical
{
IMAGE_COUNT++;
check_alt_text(cur_node);
}
}
}
#pragma omp task
traverse_dom_tree_dfs(cur_node->children, depth++);
}
} else {
for (int i = 0; i < size; i++) {
xmlNode *cur_node = nodes[i];
if (cur_node->type == XML_ELEMENT_NODE) {
if (check_if_alt_needed(cur_node)) {
#pragma omp critical
{
IMAGE_COUNT++;
check_alt_text(cur_node);
}
}
}
traverse_dom_tree_dfs(cur_node->children, depth++);
}
}
}
void traverse_dom_tree_bfs(xmlNode *node) {
if (node == NULL) return;
q_t *Q = createQueue(1000);
enqueue(Q, node);
while (!isEmpty(Q)){
xmlNode *cur = dequeue(Q);
if (cur->children != NULL) {
for (xmlNode *cur_node = cur->children; cur_node != NULL; cur_node = cur_node->next) {
// int size = get_child_length(cur->children);
// xmlNode **nodes = convert_to_array(cur->children, size);
// for (int i = 0; i < size; i++) {
// xmlNode *cur_node = nodes[i];
if (cur_node->type == XML_ELEMENT_NODE) {
#pragma omp critical
{
if (check_if_alt_needed(cur_node)) {
IMAGE_COUNT++;
check_alt_text(cur_node);
}
}
if (cur_node->children != NULL){
enqueue(Q, cur_node);
}
}
}
}
}
}
void traverse_dom_tree_wrap(xmlNode *root, int max_depth){
int depth = 0;
q_t *Q = createQueue(1000);
enqueue(Q, root);
int size;
while (!isEmpty(Q) && (depth <= max_depth)){
size = Q->size;
for (int i=0; i < size; i++){
xmlNode *cur = dequeue(Q);
if (cur->children != NULL) {
for (xmlNode *cur_node = cur->children; cur_node != NULL; cur_node = cur_node->next) {
if (cur_node->type == XML_ELEMENT_NODE) {
if (check_if_alt_needed(cur_node)) {
IMAGE_COUNT++;
check_alt_text(cur_node);
}
if (cur_node->children != NULL){
enqueue(Q, cur_node);
}
}
}
}
}
depth++;
}
size = Q->size;
xmlNode** starting_nodes = (xmlNode**)malloc(size * sizeof(xmlNode*));
int index = 0;
while(!isEmpty(Q)){
starting_nodes[index] = dequeue(Q);
index++;
}
#pragma omp parallel for num_threads(NCORES)
for (int i = 0; i < size; i++){
//printf("%d Q array elem is: %d, %d\n", starting_nodes[i], NCORES, i);
traverse_dom_tree_bfs(starting_nodes[i]);
}
return;
}
int main(int argc, char **argv) {
htmlDocPtr doc;
xmlNode *root_element = NULL;
if (argc != 3)
{
fprintf(stderr,"Expecting two arguments: [file name] [processor count]\n");
return 0;
}
NCORES = atoi(argv[2]);
if(NCORES < 1) {
fprintf(stderr, "Illegal core count: %d\n", NCORES);
return 0;
}
/* Macro to check API for match with the DLL we are using */
LIBXML_TEST_VERSION
doc = htmlReadFile(argv[1], NULL, HTML_PARSE_NOBLANKS | HTML_PARSE_NOERROR | HTML_PARSE_NOWARNING | HTML_PARSE_NONET);
if (doc == NULL)
{
fprintf(stderr, "Document not parsed successfully.\n");
return 0;
}
root_element = xmlDocGetRootElement(doc);
if (root_element == NULL)
{
fprintf(stderr, "empty document\n");
xmlFreeDoc(doc);
return 0;
}
// took timing code from wsp.c from assignment 3
struct timespec before, after;
printf("Root Node is %s\n", root_element->name);
clock_gettime(0, &before); // "0" should be CLOCK_REALTIME but vscode thinks it undefined for some reason
traverse_dom_tree_dfs(root_element, 8);
//traverse_dom_tree_wrap(root_element, 8);
clock_gettime(0, &after); // same here
double delta_ms = (double)(after.tv_sec - before.tv_sec) * 1000.0 + (after.tv_nsec - before.tv_nsec) / 1000000.0;
putchar('\n');
printf("============ Time ============\n");
printf("Time: %f ms (%f s)\n", delta_ms, delta_ms / 1000.0);
printf("Your accessibility score: %d/%d\n", ALT_COUNT, IMAGE_COUNT);
xmlFreeDoc(doc); // free document
xmlCleanupParser(); // Free globals
return 0;
}
|
GB_unop__exp2_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__exp2_fp64_fp64
// op(A') function: GB_unop_tran__exp2_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = exp2 (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = exp2 (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = exp2 (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_EXP2 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__exp2_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = exp2 (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__exp2_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
edgelist.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.), Narayanan Sundaram (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_EDGELIST_H_
#define SRC_EDGELIST_H_
#include <string>
template <typename T>
struct edge_t {
edge_t() {}
edge_t(int _src, int _dst, T _val)
{
src = _src;
dst = _dst;
val = _val;
}
int src;
int dst;
T val;
};
template <typename T>
struct edgelist_t {
edge_t<T>* edges;
int m;
int n;
int nnz;
edgelist_t() : m(0), n(0), nnz(0), edges(nullptr) {}
edgelist_t(int _m, int _n, int _nnz)
{
m = _m;
n = _n;
nnz = _nnz;
if(nnz > 0) {
edges = reinterpret_cast<edge_t<T>*>(_mm_malloc((size_t)nnz * sizeof(edge_t<T>), 64));
}
}
edgelist_t(edge_t<T>* edges, int m, int n, int nnz) : edges(edges), m(m), n(n), nnz(nnz) {}
void clear() {
if (nnz > 0) {
_mm_free(edges);
}
edges = nullptr;
nnz = 0;
m = 0;
n = 0;
}
};
template <typename T>
struct tedge_t {
int src;
int dst;
int tile_id;
T val;
};
template<typename T>
bool readLine (FILE * ifile, int * src, int * dst, T * val, bool binaryformat=true, bool edgeweights=true)
{
if(binaryformat) {
auto fread_bytes = fread(src, sizeof(int), 1, ifile);
if (feof(ifile)) return false;
assert(fread_bytes == 1);
fread_bytes = fread(dst, sizeof(int), 1, ifile);
if (feof(ifile)) return false;
assert(fread_bytes == 1);
if (edgeweights) {
fread_bytes = fread(val, sizeof(T), 1, ifile);
if (feof(ifile)) return false;
assert(fread_bytes == 1);
} else {
*val = (T)(1);
}
} else {
if (edgeweights) {
int ret;
if (std::is_same<T, float>::value) {
ret = fscanf(ifile, "%d %d %f", src, dst, val);
if (ret != 3) return false;
} else if (std::is_same<T, double>::value) {
ret = fscanf(ifile, "%d %d %lf", src, dst, val);
if (ret != 3) return false;
} else if (std::is_same<T, int>::value) {
ret = fscanf(ifile, "%d %d %d", src, dst, val);
if (ret != 3) return false;
} else if (std::is_same<T, unsigned int>::value) {
ret = fscanf(ifile, "%d %d %u", src, dst, val);
if (ret != 3) return false;
}else {
std::cout << "Data type not supported (read)" << std::endl;
}
} else {
int ret = fscanf(ifile, "%d %d", src, dst);
if (ret == 2) {
*val = (T)(1);
} else return false;
}
if (feof(ifile)) return false;
}
return true;
}
template<typename T>
void get_maxid_and_nnz(FILE* fp, int* m, int* n, unsigned long int* nnz, bool binaryformat=true, bool header=true, bool edgeweights=true) {
if (header) {
int tmp_[3];
if (binaryformat) {
auto fread_bytes = fread(tmp_, sizeof(int), 3, fp);
assert(fread_bytes == 3);
*m = tmp_[0];
*n = tmp_[1];
*nnz = tmp_[2];
} else {
int ret = fscanf(fp, "%d %d %u", &(tmp_[0]), &(tmp_[1]), &(tmp_[2]));
assert(ret == 3);
*m = tmp_[0];
*n = tmp_[1];
*nnz = tmp_[2];
}
return;
} else { //no header
unsigned long nnz_ = 0;
int tempsrc, tempdst;
int maxm = 0;
int maxn = 0;
T tempval;
while(true) {
if(feof(fp)) {
break;
}
if (!readLine<T>(fp, &tempsrc, &tempdst, &tempval, binaryformat, edgeweights)) {
break;
}
maxm = (maxm > tempsrc)?(maxm):(tempsrc);
maxn = (maxn > tempdst)?(maxn):(tempdst);
nnz_++;
}
*m = maxm;
*n = maxn;
*nnz = nnz_;
}
}
template<typename T>
void writeLine (FILE* ofile, int src, int dst, T val, bool binaryformat=true, bool edgeweights=true)
{
if (binaryformat) {
auto fwrite_bytes = fwrite(&src, sizeof(int), 1, ofile);
assert(fwrite_bytes == 1);
fwrite_bytes = fwrite(&dst, sizeof(int), 1, ofile);
assert(fwrite_bytes == 1);
if (edgeweights) {
fwrite_bytes = fwrite(&val, sizeof(T), 1, ofile);
assert(fwrite_bytes == 1);
}
} else {
if (edgeweights) {
if (std::is_same<T, float>::value) {
fprintf(ofile, "%d %d %.8f\n", src, dst, val);
} else if (std::is_same<T, double>::value) {
fprintf(ofile, "%d %d %.15lf\n", src, dst, val);
} else if (std::is_same<T, int>::value) {
fprintf(ofile, "%d %d %d\n", src, dst, val);
} else if (std::is_same<T, unsigned int>::value) {
fprintf(ofile, "%d %d %u\n", src, dst, val);
} else {
std::cout << "Data type not supported (write)\n";
}
} else {
fprintf(ofile, "%d %d\n", src, dst);
}
}
}
template <typename T>
void write_edgelist(const char* dir, const edgelist_t<T> & edgelist,
bool binaryformat=true, bool header=true, bool edgeweights=true)
{
int global_nrank = get_global_nrank();
int global_myrank = get_global_myrank();
std::stringstream fname_ss;
fname_ss << dir << global_myrank;
printf("Writing file: %s\n", fname_ss.str().c_str());
FILE * fp;
if (binaryformat) {
fp = fopen(fname_ss.str().c_str(), "wb");
if (header) {
auto fwrite_bytes = fwrite(&(edgelist.m), sizeof(int), 1, fp);
assert(fwrite_bytes == 1);
fwrite_bytes = fwrite(&(edgelist.n), sizeof(int), 1, fp);
assert(fwrite_bytes == 1);
fwrite_bytes = fwrite(&(edgelist.nnz), sizeof(int), 1, fp);
assert(fwrite_bytes == 1);
}
} else {
fp = fopen(fname_ss.str().c_str(), "w");
if (header) {
fprintf(fp, "%d %d %u\n", edgelist.m, edgelist.n, edgelist.nnz);
}
}
for(auto i = 0 ; i < edgelist.nnz ; i++)
{
writeLine<T>(fp, edgelist.edges[i].src, edgelist.edges[i].dst, edgelist.edges[i].val, binaryformat, edgeweights);
}
fclose(fp);
}
template <typename T>
void load_edgelist(const char* dir, edgelist_t<T>* edgelist,
bool binaryformat=true, bool header=true, bool edgeweights=true) {
int global_nrank = get_global_nrank();
int global_myrank = get_global_myrank();
edgelist->m = 0;
edgelist->n = 0;
edgelist->nnz = 0;
for(int i = global_myrank ; ; i += global_nrank)
{
std::stringstream fname_ss;
fname_ss << dir << i;
FILE* fp;
if (binaryformat) {
fp = fopen(fname_ss.str().c_str(), "rb");
} else {
fp = fopen(fname_ss.str().c_str(), "r");
}
if(!fp) {
printf("Could not open file: %s\n", fname_ss.str().c_str());
break;
} else {
printf("Reading file: %s\n", fname_ss.str().c_str());
}
int m_, n_;
unsigned long nnz_;
get_maxid_and_nnz<T>(fp, &m_, &n_, &nnz_, binaryformat, header, edgeweights);
edgelist->m = std::max(m_, edgelist->m);
edgelist->n = std::max(n_, edgelist->n);
edgelist->nnz += nnz_;
fclose(fp);
}
int local_max_m = edgelist->m;
int max_m = edgelist->m;
int local_max_n = edgelist->n;
int max_n = edgelist->n;
MPI_Allreduce(&local_max_m, &max_m, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
MPI_Allreduce(&local_max_n, &max_n, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
edgelist->m = max_m;
edgelist->n = max_n;
std::cout << "Got: " << edgelist->m << " by " << edgelist->n << " vertices" << std::endl;
std::cout << "Got: " << edgelist->nnz << " edges" << std::endl;
edgelist->edges = reinterpret_cast<edge_t<T>*>(
_mm_malloc((uint64_t)edgelist->nnz * (uint64_t)sizeof(edge_t<T>), 64));
unsigned long int nnzcnt = 0;
for(int i = global_myrank ; ; i += global_nrank)
{
std::stringstream fname_ss;
fname_ss << dir << i;
//printf("Opening file: %s\n", fname_ss.str().c_str());
FILE* fp;
if (binaryformat) {
fp = fopen(fname_ss.str().c_str(), "rb");
} else {
fp = fopen(fname_ss.str().c_str(), "r");
}
if(!fp) break;
if (header) { //remove header
int m_, n_;
unsigned long nnz_;
get_maxid_and_nnz<T>(fp, &m_, &n_, &nnz_, binaryformat, header, edgeweights);
}
int j = 0;
while(true) {
if (feof(fp)) {
break;
}
if (!readLine<T>(fp, &(edgelist->edges[nnzcnt].src), &(edgelist->edges[nnzcnt].dst), &(edgelist->edges[nnzcnt].val), binaryformat, edgeweights)) {
break;
}
#ifdef __DEBUG
//std::cout <<(edgelist->edges[nnzcnt].src) << " " << (edgelist->edges[nnzcnt].dst) << std::endl;
if(edgelist->edges[nnzcnt].src <= 0 ||
edgelist->edges[nnzcnt].dst <= 0 ||
edgelist->edges[nnzcnt].src > edgelist->m ||
edgelist->edges[nnzcnt].dst > edgelist->n)
{
std::cout << "Invalid edge, i, j, nnz: " << i << " , " << j << " , " << nnzcnt << std::endl;
exit(0);
}
j++;
#endif
nnzcnt++;
}
fclose(fp);
}
}
template <typename T>
void randomize_edgelist_square(edgelist_t<T>* edgelist) {
unsigned int* mapping = new unsigned int[edgelist->m];
unsigned int* rval = new unsigned int[edgelist->m];
int global_myrank = get_global_myrank();
if (global_myrank == 0) {
srand(5);
// #pragma omp parallel for
for (int i = 0; i < edgelist->m; i++) {
mapping[i] = i;
rval[i] = rand() % edgelist->m;
}
for (int i = 0; i < edgelist->m; i++) {
unsigned int tmp = mapping[i];
mapping[i] = mapping[rval[i]];
mapping[rval[i]] = tmp;
}
}
delete[] rval;
MPI_Bcast(mapping, edgelist->m, MPI_INT, 0, MPI_COMM_WORLD);
#pragma omp parallel for
for (int i = 0; i < edgelist->nnz; i++) {
edgelist->edges[i].src = mapping[edgelist->edges[i].src - 1] + 1;
edgelist->edges[i].dst = mapping[edgelist->edges[i].dst - 1] + 1;
}
delete[] mapping;
}
template<typename T>
void remove_empty_columns(edgelist_t<T> * edges, int ** remaining_indices)
{
// Remove empty columns
bool * colexists = new bool[edges->n];
memset(colexists, 0, edges->n * sizeof(bool));
int * new_colids = new int[edges->n+1];
memset(new_colids, 0, (edges->n + 1) * sizeof(int));
int new_ncols = 0;
for(int i = 0 ; i < edges->nnz ; i++)
{
if(!colexists[edges->edges[i].dst-1])
{
new_ncols++;
}
colexists[edges->edges[i].dst-1] = true;
}
std::cout << "New ncols: " << new_ncols << std::endl;
*(remaining_indices) = (int*) _mm_malloc(new_ncols * sizeof(int), 64);
int new_colcnt = 0;
for(int i = 0 ; i < edges->n; i++)
{
new_colids[i+1] = (colexists[i] ? 1 : 0) + new_colids[i];
if(colexists[i])
{
assert(new_colcnt < new_ncols);
(*(remaining_indices))[new_colcnt] = i+1;
new_colcnt++;
}
}
assert(new_colcnt == new_ncols);
#pragma omp parallel for
for(int i = 0 ; i < edges->nnz ; i++)
{
edges->edges[i].dst = new_colids[edges->edges[i].dst-1] + 1;
assert(edges->edges[i].dst - 1 >= 0);
assert(edges->edges[i].dst - 1 < new_ncols);
}
edges->n = new_ncols;
delete [] colexists;
delete [] new_colids;
}
template<typename T>
void filter_edges_by_row(edgelist_t<T> * edges, int start_row, int end_row)
{
int valid_edgecnt = 0;
for(int i = 0 ; i < edges->nnz ; i++)
{
if(edges->edges[i].src-1 < end_row &&
edges->edges[i].src-1 >= start_row)
{
edges->edges[valid_edgecnt] = edges->edges[i];
edges->edges[valid_edgecnt].src -= start_row;
valid_edgecnt++;
}
}
edges->nnz = valid_edgecnt;
edges->m = (end_row-start_row);
std::cout << "New edges->m: " << edges->m << std::endl;
}
template<typename T>
void get_dimensions(edge_t<T> * edges, int nnz, int &max_m, int &max_n)
{
int local_max_m = 0;
int local_max_n = 0;
#pragma omp parallel for reduction(max:local_max_m, local_max_n)
for(int i = 0 ; i < nnz ; i++)
{
local_max_m = std::max(local_max_m, edges[i].src);
local_max_n = std::max(local_max_n, edges[i].dst);
}
MPI_Allreduce(&local_max_m, &max_m, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
MPI_Allreduce(&local_max_n, &max_n, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
}
template <typename T>
void ReadEdges(edgelist_t<T>* edgelist, const char* fname_in, bool binaryformat=true, bool header=true, bool edgeweights=true, bool randomize=false) {
load_edgelist(fname_in, edgelist, binaryformat, header, edgeweights);
if (randomize) {
randomize_edgelist_square<T>(edgelist);
}
}
template <typename T>
void WriteEdges(const edgelist_t<T>& edgelist, const char* fname_in, bool binaryformat=true, bool header=true, bool edgeweights=true) {
write_edgelist(fname_in, edgelist, binaryformat, header, edgeweights);
}
#endif // SRC_EDGELIST_H_
|
paralelo.c | // Proyecto 1 OpenMP
// Ecuacion unidimensional de disipacion de calor
// Juan Diego Solorzano 18151
// Mario Perdomo 18029
// Jonathan Alvarez 15842
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <math.h>
int main(int argc, char* argv[])
{
double start;
double end;
// Input
double error = 0; // precision o diferencia requerida
int ni = 10; // numero de intervalos discretos
double L = 100;
double t0 = 40; // temperatura inicial de toda la barra
double tL = 40; // temperatura en la frontera izquierda
double tR = 100; // temperatura en la frontera derecha
double dt = 10000;
double dx = L/ni;
double c = 0.000001; // constante 10e-5
int threads = 2;
printf("Ingrese la cantidad de threads: ");
if (!(scanf("%d", &threads))){
printf("\nEse valor no es valido\n");
exit(0);
};
printf("Ingrese el numero de intervalos: ");
if (!(scanf("%d", &ni))){
printf("\nEse valor no es valido\n");
exit(0);
};
printf("Ingrese la longitud: ");
if (!(scanf("%lf", &L))){
printf("\nEse valor no es valido\n");
exit(0);
};
printf("Ingrese la temperatura inicial de la barra: ");
if (!(scanf("%lf", &t0))){
printf("\nEse valor no es valido\n");
exit(0);
};
printf("Ingrese la temperatura de la frontera izquierda: ");
if (!(scanf("%lf", &tL))){
printf("\nEse valor no es valido\n");
exit(0);
};
printf("Ingrese la temperatura de la frontera derecha: ");
if (!(scanf("%lf", &tR))){
printf("\nEse valor no es valido\n");
exit(0);
};
printf("Calculando con %d intervalos discretos\n", ni);
start = omp_get_wtime();
// Vectores
double t[ni];
double next_t[ni];
for (int i = 0; i < ni; i++)
{
t[i] = t0;
next_t[i] = 0.0;
}
t[0] = tL;
t[ni-1] = tR;
error = 1000;
int interv = 0;
double tError;
int j;
// while no llegamos a criterio de salida
#pragma omp parallel num_threads(threads) shared(t, interv, tError, dt, dx)
{
while (interv < 100000 && error >= 0.0001 )
{
tError = 0;
// Calcular nueva temperatura
#pragma omp for
for (j = 0; j < ni; j++)
{
next_t[j] = t[j] + ((c*dt)/pow(dx, 2)) * (t[j-1] - 2*t[j] + t[j+1]);
}
#pragma omp single
{
for (j = 0; j < ni; j++)
{
// Calcular error
double newError = fabs(next_t[j] - t[j]);
if (newError > tError){
tError = newError;
}
}
error = tError;
// Actualizar vectores
for (j = 0; j < ni; j++) {
t[j] = next_t[j];
next_t[j] = 0.0;
}
interv++;
}
}
}
printf("\nIteracion %d\n\n Vector solucion \n", interv);
for (int j = 0; j < ni; j++) {
printf("%f ", t[j]);
}
printf("\n\nError = %lf\n", error);
end = omp_get_wtime();
printf("Tiempo = %f s\n", end - start);
} |
ofmo-rys-xxxx.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#else
#include "omp-dummy.h"
#endif
#include "ofmo-index.h"
#include "ofmo-twoint.h"
#ifndef false
#define false 0
#endif
#ifndef true
#define true 1
#endif
#define HALF 0.5e0
#define ONE 1.e0
#define ZERO 0.e0
#define EPS_PS4 1.e-30
#define EPS_ERI 1.e-15
#define OFMO_EBUF_FULL 1
#define OFMO_EBUF_NOFULL 0
#define MAXNPSPAIR 100
#define EPS_PS_PAIR 1.e-32
#define EPS_CS_PAIR2 1.e-30
extern void calc_root( const int nroot, const double T,
double *U, double *W );
extern int ofmo_integ_add_fock( const int nao, const size_t nstored_eri,
const double eri_val[], const short int eri_ind4[],
const double D[], double G[] );
// 4次元整数配列の確保
static int**** ofmo_alloc_i4d( int na, int nb, int nc, int nd ) {
int ****ip, i, j, k;
ip = (int****)malloc( sizeof(int***) * na );
ip[0] = (int***)malloc( sizeof(int**) * na * nb );
ip[0][0] = (int**)malloc( sizeof(int*) * na * nb * nc );
ip[0][0][0] = (int* )malloc( sizeof(int ) * na * nb * nc * nd );
for ( i=1; i<na; i++ ) ip[i] = ip[i-1] + nb;
for ( j=1; j<nb; j++ ) ip[0][j] = ip[0][j-1] + nc;
for ( i=1; i<na; i++ ) {
for ( j=0; j<nb; j++ ) ip[i][j] = ip[i-1][j] + nb * nc;
}
for ( k=1; k<nc; k++ ) ip[0][0][k] = ip[0][0][k-1] + nd;
for ( j=1; j<nb; j++ ) {
for ( k=0; k<nc; k++ ) ip[0][j][k] = ip[0][j-1][k] + nc * nd;
}
for ( i=1; i<na; i++ ) {
for ( j=0; j<nb; j++ ) {
for ( k=0; k<nc; k++ )
ip[i][j][k] = ip[i-1][j][k] + nb * nc * nd;
}
}
return ip;
}
// 4次元整数配列の解放
static void ofmo_free_i4d( int**** ip ) {
if ( ip ) {
if ( ip[0] ) {
if ( ip[0][0] ) {
if ( ip[0][0][0] ) free( ip[0][0][0] );
free ( ip[0][0] );
}
free( ip[0] );
}
free( ip );
}
}
static int *NNAO;
static int **ANGM;
static int *LAOT;
static int *INDX;
static int **NAM;
static int **NAP;
static double *DFACT;
/* Rys積分で用いる変数 */
static double **V_XINT;
static double **V_YINT;
static double **V_ZINT;
/* HRR計算で使用する変数 */
static int *****V_HADD = NULL;
static double **V_eh = NULL;
/* indx */
static int *NROOTS;
static int **INS;
/* 縮約分子積分の格納場所 */
static double **DINTEG_MASTER = NULL;
/* カットオフテーブル関連 */
static double _CK_;
// HRR計算で必要となるアドレス情報などを設定する
static int ofmo_hrr_make_add(
const int mythread,
const int La, const int Lb, const int Lc, const int Ld ) {
int hrr_mem, ma, mb, mc, md;
int na, nb, nab, nabd;
int Lab, Lcd;
int ****HADD;
HADD = V_HADD[mythread];
Lab = La + Lb;
Lcd = Lc + Ld;
hrr_mem = 0;
// VRRで生成される縮約積分のアドレス
mb = md = 0;
for ( ma=La; ma<=Lab; ma++ ) {
na = NNAO[ma];
for ( mc=Lc; mc<=Lcd; mc++ ) {
HADD[ma][mb][mc][md] = hrr_mem;
hrr_mem += ( na*NNAO[mc] );
}
}
// ABに対するHRRのアドレス
md = 0;
for ( mb=1; mb<=Lb; mb++ ) {
nb = NNAO[mb];
for ( ma=La; ma<=(Lab-mb); ma++ ) {
nab = nb * NNAO[ma];
for ( mc=Lc; mc<=Lcd; mc++ ) {
HADD[ma][mb][mc][md] = hrr_mem;
hrr_mem += ( nab * NNAO[mc]);
}
}
}
// CDに対するHRRのアドレス
ma = La;
mb = Lb;
nab = NNAO[La]*NNAO[Lb];
for ( md=1; md<=Ld; md++ ) {
nabd = nab*NNAO[md];
for ( mc=Lc; mc<=(Lcd-md); mc++ ) {
HADD[ma][mb][mc][md] = hrr_mem;
hrr_mem += nabd * NNAO[mc];
}
}
return hrr_mem;
}
static void ofmo_rys_finalize() {
int nthreads, i;
nthreads = omp_get_max_threads();
for ( i=0; i<nthreads; i++ ) {
free( V_XINT[i] );
free( V_YINT[i] );
free( V_ZINT[i] );
free( INS[i] );
free( DINTEG_MASTER[i] );
}
free( V_XINT );
free( V_YINT );
free( V_ZINT );
free( INS );
free( NROOTS );
free( DINTEG_MASTER );
}
static int ofmo_rys_init( const int maxlqn ) {
int nthreads, nroot, maxlqn2;
maxlqn2 = 2*maxlqn;
ofmo_index_init( maxlqn2 );
NNAO = ofmo_getadd_nnao();
LAOT = ofmo_getadd_laot();
ANGM = ofmo_getadd_angm();
INDX = ofmo_getadd_indx();
NAM = ofmo_getadd_nam();
nroot = ((4*maxlqn)>>1) + 1;
nthreads = omp_get_max_threads();
V_XINT = (double**)malloc( sizeof(double*) * nthreads );
V_YINT = (double**)malloc( sizeof(double*) * nthreads );
V_ZINT = (double**)malloc( sizeof(double*) * nthreads );
NROOTS = (int*)malloc( sizeof(int) * nthreads );
INS = (int**)malloc( sizeof(int*) * nthreads );
DINTEG_MASTER = (double**)malloc( sizeof(double*) * nthreads );
#pragma omp parallel
{
int mythread, nint, n, n4;
n = NNAO[maxlqn];
n4 = n*n*n*n;
mythread = omp_get_thread_num();
nint = (maxlqn2+1)*(maxlqn2+1)*nroot;
V_XINT[mythread] = (double*)malloc( sizeof(double) * nint );
V_YINT[mythread] = (double*)malloc( sizeof(double) * nint );
V_ZINT[mythread] = (double*)malloc( sizeof(double) * nint );
INS[mythread] = (int*)malloc( sizeof(int) * 3 );
DINTEG_MASTER[mythread] = (double*)malloc( sizeof(double) * n4 );
}
atexit( ofmo_rys_finalize );
return 0;
}
static void ofmo_hrr_finalize() {
int nthreads, i;
nthreads = omp_get_max_threads();
for ( i=0; i<nthreads; i++ ) {
if ( V_eh[i] ) free( V_eh[i] );
if ( V_HADD[i] ) ofmo_free_i4d( V_HADD[i] );
}
free( V_eh );
free( V_HADD );
V_eh = NULL;
V_HADD = NULL;
}
// HRRの初期化関数(1回だけ呼び出せばよい)
static int ofmo_hrr_init( const int maxlqn ) {
int nthreads;
nthreads = omp_get_max_threads();
V_HADD = (int*****)malloc( sizeof(int****) * nthreads );
V_eh = (double**)malloc( sizeof(double*) * nthreads );
NAP = ofmo_getadd_nap();
DFACT = ofmo_getadd_dfact();
atexit( ofmo_hrr_finalize );
#pragma omp parallel
{
int mythread, hrr_mem;
mythread = omp_get_thread_num();
V_HADD[mythread] =
ofmo_alloc_i4d( 2*maxlqn+1, maxlqn+1, 2*maxlqn+1, maxlqn+1);
hrr_mem = ofmo_hrr_make_add( mythread,
maxlqn, maxlqn, maxlqn, maxlqn );
V_eh[mythread] = (double*)malloc( sizeof(double) * hrr_mem );
}
return 0;
}
// 初期化関数(1回だけ呼び出せばよい)
int ofmo_Rys_integ_init( const int maxlqn ) {
static int called = false;
double pi, t;
if ( called ) return 0;
ofmo_rys_init( maxlqn );
ofmo_hrr_init( maxlqn );
pi = 4.e0 * atan( 1.e0 );
t = 2.e0 * pi * pi * sqrt( pi );
_CK_ = sqrt( t );
called = true;
return 0;
}
static int ofmo_hrr_clear(
const int La, const int Lb, const int Lc, const int Ld,
int ****HADD, double *eh ) {
int mab, mcd, Lab, Lcd;
int nab, nabcd, i;
double *th;
Lab = La + Lb;
Lcd = Lc + Ld;
for ( mab=La; mab<=Lab; mab++ ) {
nab = NNAO[mab];
for ( mcd=Lc; mcd<=Lcd; mcd++ ) {
nabcd = nab * NNAO[mcd];
th = &eh[ HADD[mab][0][mcd][0] ];
for ( i=0; i<nabcd; i++ ) th[i] = 0.e0;
}
}
return 0;
}
static int ofmo_hrr_coef(
const int La, const int Lb, const int Lc, const int Ld,
double DINT[],
int ****HADD, double *eh ) {
int i, j, k, l, iao, jao, kao, lao, ix;
double *th, coef_a, coef_ab, coef_abc;
th = &eh[ HADD[La][Lb][Lc][Ld] ];
ix = 0;
for ( i=0, iao=LAOT[La]; i<NNAO[La]; i++, iao++ ) {
coef_a = DFACT[iao];
for ( j=0, jao=LAOT[Lb]; j<NNAO[Lb]; j++, jao++ ) {
coef_ab = coef_a * DFACT[jao];
for ( k=0, kao=LAOT[Lc]; k<NNAO[Lc]; k++, kao++ ) {
coef_abc = coef_ab * DFACT[kao];
for ( l=0, lao=LAOT[Ld]; l<NNAO[Ld]; l++, lao++ ) {
DINT[ix] = coef_abc * DFACT[lao] * th[ix];
ix++;
}
}
}
}
return 0;
}
/* 確保した並列のアドレス取得に関する関数群 */
double* ofmo_integ_getadd_xint( const int mythread ) {
return V_XINT[mythread];
}
double* ofmo_integ_getadd_yint( const int mythread ) {
return V_YINT[mythread];
}
double* ofmo_integ_getadd_zint( const int mythread ) {
return V_ZINT[mythread];
}
double* ofmo_integ_getadd_eh( const int mythread ) {
return V_eh[mythread];
}
double* ofmo_integ_getadd_eri( const int mythread ) {
return DINTEG_MASTER[mythread];
}
/** HRRを行う関数
* 外部変数
* LAOT[CS type] = CSに含まれる先頭AO番号
* NNAO[CS type] = CSに含まれるAO数
* INDX[AO type] = 添字
* HADD[ma][mb][mc][md] = 各軌道量子数4重対の先頭アドレス
* eh[] = HRRに関連する縮約積分保存に使用する配列
* */
static int ofmo_hrr_calc(
const int La, const int Lb, const int Lc, const int Ld,
const double BA[3], const double DC[3],
int ****HADD, double *eh ) {
int ma, mb, mc, md;
int Lab, Lcd;
int ix;
int iao, iao0, iao1, iaop, iaop0;
int jao, jao0, jao1, jaom, jaom0;
int kao, kao0, kao1, kaop, kaop0, k;
int lao, lao0, lao1, laom, laom0;
int add01, add10, add00;
int I01, I10, I00, IJ01, IJ10, IJ00, IJK01, IJK00;
int IJKL01, IJKL10, IJKL00;
double *d01, *d10, *d00;
Lab = La + Lb;
Lcd = Lc + Ld;
// ABに対するHRR
for ( mb=1; mb<=Lb; mb++ ) {
jao0 = LAOT[mb];
jao1 = jao0 + NNAO[mb];
jaom0 = LAOT[mb-1];
for ( ma=La; ma<=(Lab-mb); ma++ ) {
iao0 = LAOT[ma];
iao1 = iao0 + NNAO[ma];
iaop0 = LAOT[ma+1];
for ( mc=Lc; mc<=Lcd; mc++ ) {
kao0 = LAOT[mc];
kao1 = kao0 + NNAO[mc];
add01 = HADD[ma ][mb ][mc][0];
add10 = HADD[ma+1][mb-1][mc][0];
add00 = HADD[ma ][mb-1][mc][0];
for ( iao=iao0; iao<iao1; iao++ ) {
I01 = add01 + (iao-iao0)*NNAO[mb ]*NNAO[mc];
I00 = add00 + (iao-iao0)*NNAO[mb-1]*NNAO[mc];
for ( jao=jao0; jao<jao1; jao++ ) {
ix = INDX[jao];
jaom = NAM[jao][ix];
iaop = NAP[iao][ix];
IJ01 = I01 + (jao-jao0)*NNAO[mc];
IJ10 = add10 + (iaop-iaop0)*NNAO[mb-1]*NNAO[mc]
+ (jaom-jaom0)*NNAO[mc];
IJ00 = I00 + (jaom-jaom0)*NNAO[mc];
d01 = &eh[IJ01];
d10 = &eh[IJ10];
d00 = &eh[IJ00];
for ( kao=kao0, k=0; kao<kao1; kao++, k++ )
d01[k] = d10[k] - BA[ix]*d00[k];
}
}
} // for (mc)
} // for (ma)
} // for (mb);
// CDに対するHRR
ma = La;
mb = Lb;
iao0 = LAOT[ma];
iao1 = iao0 + NNAO[ma];
jao0 = LAOT[mb];
jao1 = jao0 + NNAO[mb];
for ( md=1; md<=Ld; md++ ) {
lao0 = LAOT[md];
lao1 = lao0 + NNAO[md];
laom0 = LAOT[md-1];
for ( mc=Lc; mc<=(Lcd-md); mc++ ) {
kao0 = LAOT[mc];
kao1 = kao0 + NNAO[mc];
kaop0 = LAOT[mc+1];
add01 = HADD[ma][mb][mc ][md ];
add10 = HADD[ma][mb][mc+1][md-1];
add00 = HADD[ma][mb][mc ][md-1];
for ( iao=iao0; iao<iao1; iao++ ) {
I01 = add01 + (iao-iao0)*NNAO[mb]*NNAO[mc ]*NNAO[md ];
I10 = add10 + (iao-iao0)*NNAO[mb]*NNAO[mc+1]*NNAO[md-1];
I00 = add00 + (iao-iao0)*NNAO[mb]*NNAO[mc ]*NNAO[md-1];
for ( jao=jao0; jao<jao1; jao++ ) {
IJ01 = I01 + (jao-jao0)*NNAO[mc ]*NNAO[md ];
IJ10 = I10 + (jao-jao0)*NNAO[mc+1]*NNAO[md-1];
IJ00 = I00 + (jao-jao0)*NNAO[mc ]*NNAO[md-1];
for ( kao=kao0; kao<kao1; kao++ ) {
IJK01 = IJ01 + (kao-kao0)*NNAO[md ];
IJK00 = IJ00 + (kao-kao0)*NNAO[md-1];
for ( lao=lao0; lao<lao1; lao++ ) {
ix = INDX[lao];
laom = NAM[lao][ix];
kaop = NAP[kao][ix];
IJKL01 = IJK01 + (lao-lao0);
IJKL10 = IJ10
+ (kaop-kaop0)*NNAO[md-1] + (laom-laom0);
IJKL00 = IJK00 + (laom-laom0);
eh[IJKL01] = eh[IJKL10] - DC[ix]*eh[IJKL00];
} // for (lao)
} // for (kao)
} // for (jao)
} // for (iao)
} // for (mc)
} // for (md)
return 0;
}
static void set_indx( const int mythread,
const int La, const int Lb, const int Lc, const int Ld ) {
int Lab, Lcd;
Lab = La + Lb;
Lcd = Lc + Ld;
NROOTS[mythread] = ( (Lab+Lcd)>>1 ) + 1;
INS[mythread][0] = 1;
INS[mythread][1] = NROOTS[mythread] * (Lcd+1);
INS[mythread][2] = NROOTS[mythread];
}
static int indx( const int mythread,
const int m, const int N, const int M ) {
return m*INS[mythread][0]+N*INS[mythread][1]+M*INS[mythread][2];
}
static void ofmo_form( const int mythread,
const int La, const int Lb, const int Lc, const int Ld,
const int nroot, const double *xint, const double *yint,
const double *zint, int ****HADD, double *eh ) {
int Lab, Lcd;
int mab, mcd, iao, iao0, iao1, kao, kao0, kao1, m;
int ix, iy, iz, kx, ky, kz;
int IJKL, ncd;
double *XSXS;
Lab = La + Lb;
Lcd = Lc + Ld;
for ( mab=La; mab<=Lab; mab++ ) {
iao0 = LAOT[mab];
iao1 = iao0 + NNAO[mab];
for ( mcd=Lc; mcd<=Lcd; mcd++ ) {
kao0 = LAOT[mcd];
ncd = NNAO[mcd];
kao1 = kao0 + ncd;
XSXS = &eh[ HADD[mab][0][mcd][0] ];
for ( iao=iao0; iao<iao1; iao++ ) {
ix = ANGM[iao][0];
iy = ANGM[iao][1];
iz = ANGM[iao][2];
for ( kao=kao0; kao<kao1; kao++ ) {
kx = ANGM[kao][0];
ky = ANGM[kao][1];
kz = ANGM[kao][2];
IJKL = (iao-iao0)*ncd + (kao-kao0);
for ( m=0; m<nroot; m++ )
XSXS[IJKL] += xint[ indx(mythread,m,ix,kx) ] *
yint[ indx(mythread,m,iy,ky) ] *
zint[ indx(mythread,m,iz,kz) ];
}
}
}
}
}
static void ofmo_xyzint_v( const int mythread,
const int La, const int Lb, const int Lc, const int Ld,
const double *F00, const double *B00, const double *B10,
const double *B01, const double *C00, const double *CP00,
const int nroot, double *xint, double *yint, double *zint ) {
int Lab, Lcd;
int m, m3, N, M, ix3, ix2, ix1, ix0;
double C10[13], CP10[13], CP01[13], C01[13];
Lab = La + Lb;
Lcd = Lc + Ld;
// (0,0)
for ( m=0; m<nroot; m++ ) {
ix0 = indx( mythread, m, 0, 0 );
xint[ix0] = 1.e0;
yint[ix0] = 1.e0;
zint[ix0] = F00[m];
}
// (1,0)
if ( Lab > 0 ) {
for ( m=m3=0; m<nroot; m++, m3+=3 ) {
ix0 = indx( mythread, m, 1, 0 );
xint[ix0] = C00[m3+0];
yint[ix0] = C00[m3+1];
zint[ix0] = F00[m]*C00[m3+2];
}
}
// (0,1)
if ( Lcd > 0 ) {
for ( m=m3=0; m<nroot; m++, m3+=3 ) {
ix0 = indx( mythread, m, 0, 1 );
xint[ix0] = CP00[m3+0];
yint[ix0] = CP00[m3+1];
zint[ix0] = F00[m] * CP00[m3+2];
}
}
// (1,1) = C'00*(1,0)+B00*(0,0)
if ( Lab>0 && Lcd>0 ) {
for ( m=m3=0; m<nroot; m++, m3+=3 ) {
ix1 = indx( mythread, m, 1, 1 );
ix0 = indx( mythread, m, 1, 0 );
xint[ix1] = CP00[m3+0]*xint[ix0] + B00[m];
yint[ix1] = CP00[m3+1]*yint[ix0] + B00[m];
zint[ix1] = CP00[m3+2]*zint[ix0] + B00[m]*F00[m];
}
}
// (N,0) and (N,1) (N>=2)
// (N,0) = C00 *(N-1,0) + (N-1)*B10*(N-2,0)
// (N,1) = C'00*(N,0) + N*B00*(N-1,0)
if ( Lab > 1 ) {
for ( m=0; m<nroot; m++ ) {
C10[m] = 0.e0;
CP10[m] = B00[m];
}
for ( N=2; N<=Lab; N++ ) {
for ( m=m3=0; m<nroot; m++, m3+=3 ) {
C10[m] += B10[m];
ix2 = indx( mythread, m, N , 0 );
ix1 = indx( mythread, m, N-1, 0 );
ix0 = indx( mythread, m, N-2, 0 );
xint[ix2]=C00[m3+0]*xint[ix1] + C10[m]*xint[ix0];
yint[ix2]=C00[m3+1]*yint[ix1] + C10[m]*yint[ix0];
zint[ix2]=C00[m3+2]*zint[ix1] + C10[m]*zint[ix0];
}
if ( Lcd>0 ) {
for ( m=m3=0; m<nroot; m++, m3+=3 ) {
CP10[m] += B00[m];
ix2 = indx( mythread, m, N , 1 );
ix1 = indx( mythread, m, N , 0 );
ix0 = indx( mythread, m, N-1, 0 );
xint[ix2] = CP00[m3+0]*xint[ix1] + CP10[m]*xint[ix0];
yint[ix2] = CP00[m3+1]*yint[ix1] + CP10[m]*yint[ix0];
zint[ix2] = CP00[m3+2]*zint[ix1] + CP10[m]*zint[ix0];
}
}
}
} // if (Lab>1)
// (0,M) and (1,M) (M>=2)
// (0,M) = C'00*(0,M-1) + (M-1)*B'01*(0,M-2)
// (1,M) = C00 *(0,M) + M* B00*(0,M-1)
if ( Lcd > 1 ) {
for ( m=0; m<nroot; m++ ) {
CP01[m] = 0.e0;
C01[m] = B00[m];
}
for ( M=2; M<=Lcd; M++ ) {
for ( m=m3=0; m<nroot; m++, m3+=3 ) {
CP01[m] += B01[m];
ix2 = indx( mythread, m, 0, M );
ix1 = indx( mythread, m, 0, M-1 );
ix0 = indx( mythread, m, 0, M-2 );
xint[ix2] = CP00[m3+0]*xint[ix1] + CP01[m]*xint[ix0];
yint[ix2] = CP00[m3+1]*yint[ix1] + CP01[m]*yint[ix0];
zint[ix2] = CP00[m3+2]*zint[ix1] + CP01[m]*zint[ix0];
}
if ( Lab>0 ) {
for ( m=m3=0; m<nroot; m++, m3+=3 ) {
C01[m] += B00[m];
ix2 = indx( mythread, m, 1, M );
ix1 = indx( mythread, m, 0, M );
ix0 = indx( mythread, m, 0, M-1 );
xint[ix2] = C00[m3+0]*xint[ix1] + C01[m]*xint[ix0];
yint[ix2] = C00[m3+1]*yint[ix1] + C01[m]*yint[ix0];
zint[ix2] = C00[m3+2]*zint[ix1] + C01[m]*zint[ix0];
}
}
}
}
// (N,M) (N>=2 and M>=2)
// (N,M) = C00*(N-1,M) + (N-1)*B10*(N-2,M) + M*B00*(N-1,M-1)
if ( Lab>1 && Lcd>1 ) {
for ( m=0; m<nroot; m++ ) C01[m] = B00[m];
for ( M=2; M<=Lcd; M++ ) {
for ( m=0; m<nroot; m++ ) {
C01[m] += B00[m];
C10[m] = B10[m];
}
for ( N=2; N<=Lab; N++ ) {
for ( m=m3=0; m<nroot; m++, m3+=3 ) {
ix3 = indx( mythread, m, N , M );
ix2 = indx( mythread, m, N-1, M );
ix1 = indx( mythread, m, N-2, M );
ix0 = indx( mythread, m, N-1, M-1 );
xint[ix3] = C00[m3+0]*xint[ix2]+C10[m]*xint[ix1]
+C01[m]*xint[ix0];
yint[ix3] = C00[m3+1]*yint[ix2]+C10[m]*yint[ix1]
+C01[m]*yint[ix0];
zint[ix3] = C00[m3+2]*zint[ix2]+C10[m]*zint[ix1]
+C01[m]*zint[ix0];
C10[m] += B10[m];
}
}
}
}
}
static int ofmo_twoint_core_rys_xxxx(
const int mythread,
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
const int *nijps, const double vzeta[], const double vdkab[],
const double vxiza[], const double BA[3],
const int *nklps, const double veta[], const double vdkcd[],
const double vxizc[], const double DC[3], const double AC[3],
double DINT[] ) {
int ijps, klps, i;
double cssss, zeta, dkab, xiza, eta, xizc, dk, T;
double zeta2, eta2, rz, PA[3], QC[3];
double PQ2, sqrho, rho, PC[3], QP[3];
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
//
double C00[13*3], CP00[13*3], B00[13], B10[13], B01[13], F00[13];
double rrho, rze, W[13], U[13];
double u2, duminv, dm2inv, dum;
int m, m3;
//
int ****HADD, nroot;
double *xint, *yint, *zint, *eh;
HADD = V_HADD[mythread];
xint = V_XINT[mythread];
yint = V_YINT[mythread];
zint = V_ZINT[mythread];
eh = V_eh[mythread];
nroot = NROOTS[mythread];
ofmo_hrr_clear( La, Lb, Lc, Ld, HADD, eh );
for ( ijps=0; ijps<(*nijps); ijps++ ) {
zeta = vzeta[ijps];
dkab = vdkab[ijps];
xiza = vxiza[ijps];
zeta2 = HALF * zeta;
for ( i=0; i<3; i++ ) {
PC[i] = AC[i] + xiza*BA[i];
PA[i] = xiza * BA[i];
}
for ( klps=0; klps<(*nklps); klps++ ) {
eta = veta[klps];
dk = dkab * vdkcd[klps];
xizc = vxizc[klps];
eta2 = HALF * eta;
PQ2 = ZERO;
for ( i=0; i<3; i++ ) {
QC[i] = xizc*DC[i];
QP[i] = xizc*DC[i] - PC[i];
PQ2 += QP[i]*QP[i];
}
rrho = zeta + eta;
rze = zeta * eta;
sqrho = sqrt(1.e0/rrho);
rho = sqrho * sqrho;
rz = rho * zeta;
T = rho * PQ2;
cssss = sqrho * dk;
calc_root( nroot, T, U, W );
for ( m=m3=0; m<nroot; m++, m3+=3 ) {
u2 = rho * U[m];
F00[m] = cssss * W[m];
duminv = 1.e0 / ( 1.e0 + rrho * u2 );
dm2inv = 0.5e0 * duminv;
B00[m] = dm2inv * rze * u2;
B10[m] = dm2inv * ( zeta + rze*u2 );
B01[m] = dm2inv * ( eta + rze*u2 );
dum = zeta * u2 * duminv;
for ( i=0; i<3; i++ ) C00[m3+i] = PA[i] + dum * QP[i];
dum = eta * u2 * duminv;
for ( i=0; i<3; i++ ) CP00[m3+i] = QC[i] - dum * QP[i];
}
ofmo_xyzint_v( mythread, La, Lb, Lc, Ld,
F00, B00, B10, B01, C00, CP00,
nroot, xint, yint, zint );
ofmo_form( mythread, La, Lb, Lc, Ld,
nroot, xint, yint, zint, HADD, eh );
}
}
ofmo_hrr_calc( La, Lb, Lc, Ld, BA, DC, HADD, eh );
ofmo_hrr_coef( La, Lb, Lc, Ld, DINT, HADD, eh );
return 0;
}
// 縮約積分計算関数
int ofmo_twoint_rys_xxxx(
const int *pnworkers, const int *pworkerid,
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// for partially direct SCF
const long *pebuf_max_nzeri, long *ebuf_non_zero_eri,
double ebuf_val[], short int ebuf_ind4[],
int *last_ijcs, int *last_klcs ) {
int Lab, Lcd, i, j, k, l, ipat, ix;
int I2, IJ, K2, KL;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1, max_klcs ;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe, coe0;
double *DINTEG;
long nzeri, max_nzeri, nzeri4;
//
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
long ebuf_max_nzeri = *pebuf_max_nzeri;
//
int na, nb, nc, nd;
//
int mythread;
mythread = omp_get_thread_num();
ofmo_hrr_make_add( mythread, La, Lb, Lc, Ld );
set_indx( mythread, La, Lb, Lc, Ld );
DINTEG = DINTEG_MASTER[mythread];
na = NNAO[La];
nb = NNAO[Lb];
nc = NNAO[Lc];
nd = NNAO[Ld];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs0 = leading_cs_pair[Lab];
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
nzeri = *ebuf_non_zero_eri;
max_nzeri = ebuf_max_nzeri - na*nb*nc*nd;
nzeri4 = nzeri*4;
if ( nzeri >= max_nzeri ) {
*last_ijcs = ijcs0+workerid;
*last_klcs = klcs0 - 1;
*ebuf_non_zero_eri = nzeri;
return OFMO_EBUF_FULL;
}
for ( ijcs=ijcs0+workerid; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
max_klcs = ( Lab == Lcd ? ijcs+1 : klcs1 );
for ( klcs=klcs0; klcs<max_klcs; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < EPS_PS4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao0 = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
ofmo_twoint_core_rys_xxxx( mythread,
&La, &Lb, &Lc, &Ld,
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DINTEG );
ipat = ((Lab != Lcd) || (ics==kcs && jcs>lcs) ? true : false);
#ifdef SORT_CSP
int ijgekl = (ics>kcs);
if (ics==kcs) ijgekl = (jcs>=lcs);
if (!ijgekl) ipat = ( (ics==kcs && jcs<lcs) ? true : false);
#endif
for ( i=0, iao=iao0, ix=0; i<na; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<nb; j++, jao++ ) {
if ( jao>iao ) { ix+=nc*nd; continue; }
IJ = I2 + jao;
coe0 = ( iao==jao ? HALF : ONE );
for ( k=0, kao=kao0; k<nc; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<nd; l++, lao++, ix++ ) {
if ( lao>kao ) continue;
if ( fabs(DINTEG[ix]) > EPS_ERI ) {
KL = K2 + lao;
#ifndef SORT_CSP
if ( IJ >= KL ) {
#else
if ((ijgekl&&IJ>=KL) || (!ijgekl&&KL>=IJ)) {
#endif
coe = coe0;
if ( kao==lao ) coe *= HALF;
if ( KL == IJ ) coe *= HALF;
ebuf_val[nzeri] = coe*DINTEG[ix];
ebuf_ind4[nzeri4+0] = (short int)iao;
ebuf_ind4[nzeri4+1] = (short int)jao;
ebuf_ind4[nzeri4+2] = (short int)kao;
ebuf_ind4[nzeri4+3] = (short int)lao;
nzeri++;
nzeri4+=4;
} else if ( ipat ) {
coe = coe0;
if ( kao==lao ) coe*=HALF;
ebuf_val[nzeri] = coe*DINTEG[ix];
ebuf_ind4[nzeri4+0] = (short int)kao;
ebuf_ind4[nzeri4+1] = (short int)lao;
ebuf_ind4[nzeri4+2] = (short int)iao;
ebuf_ind4[nzeri4+3] = (short int)jao;
nzeri++;
nzeri4+=4;
}
}
} // l
} // k
} // j
} // i
if ( nzeri >= max_nzeri ) {
*last_ijcs = ijcs;
*last_klcs = klcs;
*ebuf_non_zero_eri = nzeri;
return OFMO_EBUF_FULL;
}
} // for ( klcs );
} // for ( ijcs );
*ebuf_non_zero_eri = nzeri;
return OFMO_EBUF_NOFULL;
}
//
// 縮約積分計算関数
int ofmo_twoint_direct_rys_xxxx(
const int *pnworkers, const int *pworkerid,
const int *pLa, const int *pLb, const int *pLc, const int *pLd,
const int shel_atm[], const int shel_ini[],
const double atom_x[], const double atom_y[],
const double atom_z[], const int leading_cs_pair[],
const double csp_schwarz[],
const int csp_ics[], const int csp_jcs[],
const int csp_leading_ps_pair[],
const double psp_zeta[], const double psp_dkps[],
const double psp_xiza[],
// for direct SCF
const long *petmp_max_nzeri, long *petmp_non_zero_eri,
double etmp_val[], short int etmp_ind4[],
const int *plast_ijcs, const int *plast_klcs,
// density matrix & G-matrix data
const int *pnao, const double Ds[], double G[] ) {
int nworkers=*pnworkers, workerid=*pworkerid;
int La=*pLa, Lb=*pLb, Lc=*pLc, Ld=*pLd;
int last_ijcs=*plast_ijcs, last_klcs=*plast_klcs, nao=*pnao;
long max_nzeri=*petmp_max_nzeri;
long nzeri4, nzeri=*petmp_non_zero_eri;
//
int Lab, Lcd, i, j, k, l, ipat, ix;
int I2, IJ, K2, KL;
int ijcs, ijcs0, ijcs1;
int klcs, klcs0, klcs1, max_klcs ;
int ijps0, nijps, klps0, nklps;
int ics, iat, iao, iao0, jcs, jat, jao, jao0;
int kcs, kat, kao, kao0, lcs, lat, lao, lao0;
double A[3], B[3], C[3], D[3], BA[3], DC[3], AC[3];
double val_ab, val_cd, coe, coe0;
double *DINTEG;
//
int na, nb, nc, nd;
//
int mythread;
float eps_eri = ofmo_twoint_eps_eri(0);
float eps_ps4 = ofmo_twoint_eps_ps4(0);
float eps_sch = ofmo_twoint_eps_sch(0);
mythread = omp_get_thread_num();
ofmo_hrr_make_add( mythread, La, Lb, Lc, Ld );
set_indx( mythread, La, Lb, Lc, Ld );
DINTEG = DINTEG_MASTER[mythread];
na = NNAO[La];
nb = NNAO[Lb];
nc = NNAO[Lc];
nd = NNAO[Ld];
Lab = La*(La+1)/2+Lb;
Lcd = Lc*(Lc+1)/2+Ld;
ijcs1 = leading_cs_pair[Lab+1];
klcs0 = leading_cs_pair[Lcd];
klcs1 = leading_cs_pair[Lcd+1];
if ( last_ijcs != -1 ) {
ijcs = last_ijcs;
klcs = last_klcs+1;
} else {
ijcs = leading_cs_pair[Lab] + workerid;
klcs = klcs0;
}
max_nzeri -= na*nb*nc*nd;
nzeri4 = nzeri*4;
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4, Ds, G );
nzeri = nzeri4 = 0;
}
for ( ; ijcs<ijcs1; ijcs+=nworkers ) {
val_ab = csp_schwarz[ijcs];
ics = csp_ics[ijcs];
jcs = csp_jcs[ijcs];
ijps0 = csp_leading_ps_pair[ijcs];
nijps = csp_leading_ps_pair[ijcs+1]-ijps0;
iat = shel_atm[ics];
jat = shel_atm[jcs];
iao0 = shel_ini[ics];
jao0 = shel_ini[jcs];
A[0]=atom_x[iat]; A[1]=atom_y[iat]; A[2]=atom_z[iat];
B[0]=atom_x[jat]; B[1]=atom_y[jat]; B[2]=atom_z[jat];
for ( i=0; i<3; i++ ) BA[i] = B[i] - A[i];
max_klcs = ( Lab == Lcd ? ijcs+1 : klcs1 );
for ( ; klcs<max_klcs; klcs++ ) {
val_cd = csp_schwarz[klcs];
if ( val_ab*val_cd < eps_ps4 ) continue;
kcs = csp_ics[klcs];
lcs = csp_jcs[klcs];
if ( val_ab*val_cd*ofmo_twoint_dmax6(ics,jcs,kcs,lcs) < eps_sch ) continue;
klps0 = csp_leading_ps_pair[klcs];
nklps = csp_leading_ps_pair[klcs+1]-klps0;
kat = shel_atm[kcs];
lat = shel_atm[lcs];
kao0 = shel_ini[kcs];
lao0 = shel_ini[lcs];
C[0]=atom_x[kat]; C[1]=atom_y[kat]; C[2]=atom_z[kat];
D[0]=atom_x[lat]; D[1]=atom_y[lat]; D[2]=atom_z[lat];
for ( i=0; i<3; i++ ) {
AC[i] = A[i] - C[i];
DC[i] = D[i] - C[i];
}
ofmo_twoint_core_rys_xxxx( mythread,
&La, &Lb, &Lc, &Ld,
&nijps, &psp_zeta[ijps0], &psp_dkps[ijps0],
&psp_xiza[ijps0], BA,
&nklps, &psp_zeta[klps0], &psp_dkps[klps0],
&psp_xiza[klps0], DC, AC, DINTEG );
ipat = ((Lab != Lcd) || (ics==kcs && jcs>lcs) ? true : false);
#ifdef SORT_CSP
int ijgekl = (ics>kcs);
if (ics==kcs) ijgekl = (jcs>=lcs);
if (!ijgekl) ipat = ( (ics==kcs && jcs<lcs) ? true : false);
#endif
for ( i=0, iao=iao0, ix=0; i<na; i++, iao++ ) {
I2 = (iao*iao+iao)>>1;
for ( j=0, jao=jao0; j<nb; j++, jao++ ) {
if ( jao>iao ) { ix+=nc*nd; continue; }
IJ = I2 + jao;
coe0 = ( iao==jao ? HALF : ONE );
for ( k=0, kao=kao0; k<nc; k++, kao++ ) {
K2 = (kao*kao+kao)>>1;
for ( l=0, lao=lao0; l<nd; l++, lao++, ix++ ) {
if ( lao>kao ) continue;
if ( fabs(DINTEG[ix]) > eps_eri ) {
KL = K2 + lao;
#ifndef SORT_CSP
if ( IJ >= KL ) {
#else
if ((ijgekl&&IJ>=KL) || (!ijgekl&&KL>=IJ)) {
#endif
coe = coe0;
if ( kao==lao ) coe *= HALF;
if ( KL == IJ ) coe *= HALF;
etmp_val[nzeri] = coe*DINTEG[ix];
etmp_ind4[nzeri4+0] = (short int)iao;
etmp_ind4[nzeri4+1] = (short int)jao;
etmp_ind4[nzeri4+2] = (short int)kao;
etmp_ind4[nzeri4+3] = (short int)lao;
nzeri++;
nzeri4+=4;
} else if ( ipat ) {
coe = coe0;
if ( kao==lao ) coe*=HALF;
etmp_val[nzeri] = coe*DINTEG[ix];
etmp_ind4[nzeri4+0] = (short int)kao;
etmp_ind4[nzeri4+1] = (short int)lao;
etmp_ind4[nzeri4+2] = (short int)iao;
etmp_ind4[nzeri4+3] = (short int)jao;
nzeri++;
nzeri4+=4;
}
}
} // l
} // k
} // j
} // i
if ( nzeri >= max_nzeri ) {
ofmo_integ_add_fock( nao, nzeri, etmp_val, etmp_ind4,
Ds, G );
nzeri = nzeri4= 0;
}
} // for ( klcs );
klcs = klcs0;
} // for ( ijcs );
*petmp_non_zero_eri = nzeri;
return 0;
}
|
gradientCheck.h | #include <iostream>
#include <list>
#include <ctime>
#include <cstdio>
#include <tclap/CmdLine.h>
#include <boost/algorithm/string/join.hpp>
#include "param.h"
#include "neuralClasses.h"
//#include "graphClasses.h"
#include "util.h"
//#include "RBMDahlFunctions.h"
#include "log_add.h"
#include <cmath>
#include <stdlib.h>
typedef Node <Word_embeddings> word_node;
typedef Node <Context_matrix> context_node;
typedef Node <Hidden_layer> hidden_node;
//#include "lossFunctions.h"
//#include<tr1/random>
#include <time.h>
//#include <chrono>
//#include <random>
#include <Eigen/Dense>
#include <Eigen/Core>
#include <stdio.h>
#include <iomanip>
#include <boost/random/uniform_real_distribution.hpp>
#include <boost/random/uniform_int_distribution.hpp>
#include <boost/random/mersenne_twister.hpp>
#include "maybe_omp.h"
#include <math.h>
#include <boost/unordered_map.hpp>
#include <boost/functional.hpp>
#include <stdlib.h>
typedef boost::unordered_map<vector<int>, double> vector_map;
typedef boost::unordered_map<int,vector_map > thread_vector_map;
typedef Eigen::Matrix<double,Dynamic,Dynamic> RealMatrix;
using namespace std;
using namespace Eigen;
using namespace boost::random;
void inline fPropGradCheck(param & ,int ,int ,vector<word_node > &,vector<context_node > &,hidden_node &,context_node &,vector<Matrix<int,Dynamic,1> >&);
double computeLossFunction(param & ,int ,int ,vector<word_node > &,
vector<context_node > &,hidden_node &,context_node & ,
vector<Matrix<int,Dynamic,1> >&,vector_map & ,Matrix<double,Dynamic,Dynamic> & ,
Matrix<int,Dynamic,Dynamic> & ,vector<vector<double> > &,Output_word_embeddings & );
void initZero(param & ,vector<word_node > &,vector<context_node > &,hidden_node &,
context_node & );
void gradientChecking(param & myParam,int minibatch_start_index,int current_minibatch_size,vector<word_node > &word_nodes,
vector<context_node > &context_nodes,hidden_node &hidden_layer_node,context_node & hidden_layer_to_output_node,
vector<Matrix<int,Dynamic,1> >&shuffled_training_data,vector_map &c_h,vector<uniform_real_distribution<> >& unif_real_vector,
vector<mt19937> & eng_real_vector,vector<uniform_int_distribution<> > & unif_int_vector,vector<mt19937> & eng_int_vector,
vector<vector<double> > &unigram_probs_vector,vector<vector<double> > & q_vector,vector<vector<int> >&J_vector,Output_word_embeddings & D_prime)
{
double delta_perturb = 0.000005;
std::setprecision(20);
//creating the gradient matrices
RealMatrix gradient_input_W;
RealMatrix gradient_output_W;
Matrix<double,Dynamic,1>gradient_output_b;
Matrix<double,Dynamic,1>gradient_h_bias;
RealMatrix gradient_hidden_to_output_matrix;
vector<RealMatrix> gradients_context_matrix;
int ngram_size = myParam.ngram_size;
int n_hidden = myParam.n_hidden;
int n_vocab = myParam.n_vocab;
int embedding_dimension = myParam.embedding_dimension;
int minibatch_size = myParam.minibatch_size;
int num_noise_samples = myParam.num_noise_samples;
double normalization_init = myParam.normalization_init;
gradient_input_W.setZero(myParam.n_vocab,myParam.embedding_dimension);
gradient_output_W.setZero(myParam.n_vocab,myParam.embedding_dimension);
gradient_output_b.setZero(myParam.n_vocab);
gradient_h_bias.setZero(myParam.n_hidden);
gradient_hidden_to_output_matrix.setZero(myParam.embedding_dimension,myParam.n_hidden);
for (int word = 0;word<myParam.ngram_size-1;word++)
{
RealMatrix context_matrix_gradient;
context_matrix_gradient.setZero(myParam.n_hidden,myParam.embedding_dimension);
gradients_context_matrix.push_back(context_matrix_gradient);
}
///////////////////FORWARD PROPAGATION//////////////////////////
////////////////////////////////////////////////////////////////
fPropGradCheck(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node,shuffled_training_data);
/////////////////COMPUTING THE NCE LOSS FUNCTION//////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////
//computing the loss function
//now computing the loss function
//Matrix<double,Dynamic,Dynamic> output_gradient;
Matrix<double,Dynamic,Dynamic> minibatch_predicted_embeddings(myParam.embedding_dimension,current_minibatch_size);
Matrix<double,Dynamic,1> minibatch_positive_weights(current_minibatch_size);
Matrix<double,Dynamic,Dynamic> minibatch_negative_weights(current_minibatch_size,myParam.num_noise_samples);
Matrix<int,Dynamic,Dynamic> minibatch_negative_samples(current_minibatch_size,myParam.num_noise_samples);
//int thread_id = 0; //for now, this is a proxy. When I'm multithreading this code, the thread ID will change
//creating the unordered map for each thread
//thread_vector_map c_h_gradient_vector;
vector<vector_map> c_h_gradient_vector;
vector_map c_h_gradient;
for (int thread_id =0 ;thread_id<myParam.n_threads;thread_id++)
{
vector_map temp;
c_h_gradient_vector.push_back(temp);
}
clock_t t;
t = clock();
//c_h_gradient_vector += minibatch_positive_weights(train_id)
//parallelizing the creation with multithreading
Eigen::initParallel();
Eigen::setNbThreads(1);
cout<<"staring the fprop"<<endl;
#pragma omp parallel firstprivate(current_minibatch_size,minibatch_start_index,ngram_size,n_vocab,embedding_dimension, \
num_noise_samples,normalization_init)
{
#pragma omp for //schedule(dynamic)
for (int train_id = 0;train_id < minibatch_size;train_id++)
{
int thread_id = omp_get_thread_num();
int output_word = shuffled_training_data[ngram_size-1](minibatch_start_index+train_id);
//cout<<"output word is "<<output_word<<endl;
Matrix<double,Dynamic,1> predicted_embedding = hidden_layer_to_output_node.fProp_matrix.col(train_id);
vector<int> context;//(ngram_size-1);
//creating the context
for (int word = 0;word<ngram_size-1;word++)
{
//cout<<"train id is "<<train_id<<endl;
//cout<<"minibatch start index is "<<minibatch_start_index<<endl;
context.push_back(shuffled_training_data[word](minibatch_start_index+train_id));
cout<<"word "<<word<<" in context is "<<shuffled_training_data[word](minibatch_start_index+train_id)<<endl;
//context.push_back((*thread_data_col_locations[thread_id][word])(minibatch_start_index+train_id));
}
double log_inv_normalization_const_h = 0.;
//getting a normalization constant and making it threasafe
//this region does not need to be critical because its just a read
log_inv_normalization_const_h = c_h[context];
double inv_normalization_const_h = exp(log_inv_normalization_const_h);
//cout<<"The normalization constant is "<<inv_normalization_const_h<<endl;
//setting the gradient for that context to 0;
//double c_h_gradient_vector = 0.0;
minibatch_predicted_embeddings.col(train_id) = predicted_embedding;
double score = D_prime.W.row(output_word).dot(predicted_embedding) + D_prime.b(output_word);
double unnorm_positive_prob = exp(score);
minibatch_positive_weights(train_id) = num_noise_samples*unigram_probs_vector[thread_id][output_word]/
(unnorm_positive_prob*inv_normalization_const_h + num_noise_samples * unigram_probs_vector[thread_id][output_word]) ;
if (c_h_gradient_vector[thread_id].find(context) == c_h_gradient_vector[thread_id].end())
{
c_h_gradient_vector[thread_id][context] = minibatch_positive_weights(train_id);
}
else
{
//cout<<"we got a repeat!"<<endl;
c_h_gradient_vector[thread_id][context] += minibatch_positive_weights(train_id);
}
///COMPUTING NOISE SAMPLES///
for (int sample_id = 0;sample_id <num_noise_samples;sample_id++)
{
int mixture_component = unif_int_vector[thread_id](eng_int_vector[thread_id]);
//cout<<"mixture component was "<<mixture_component<<endl;
double p = unif_real_vector[thread_id](eng_real_vector[thread_id]);
int sample ;
//cout<<"computing sample"<<endl;
//cout<<"remaining bernoulli item is "<<J_vector[thread_id][mixture_component]<<endl;
if (q_vector[thread_id][mixture_component] >= p)
{
//cout<<"mixture accepted"<<endl;
sample = mixture_component;
}
else
{
//cout<<"J accepted "<<endl;
sample = J_vector[thread_id][mixture_component];
}
//vector<int> context(ngram_size-1);
//cout<<"the sample was "<<sample<<endl;
assert (sample >= 0);
minibatch_negative_samples(train_id,sample_id) = sample;
double negative_score = D_prime.W.row(sample).dot(predicted_embedding) + D_prime.b(sample);
double negative_unnorm_prob = exp(negative_score);
minibatch_negative_weights(train_id,sample_id) = negative_unnorm_prob*inv_normalization_const_h/
(negative_unnorm_prob*inv_normalization_const_h + num_noise_samples * unigram_probs_vector[thread_id][sample]);
c_h_gradient_vector[thread_id][context] -= minibatch_negative_weights(train_id,sample_id);
}
}
}
#pragma omp barrier
/////////////////////////////////UPDATING GRADIENTS AND DOING BACKPROPAGATION/////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////
//t = clock();
//updating the normalization constants
for (int thread_id=0;thread_id<myParam.n_threads;thread_id++)
{
vector_map::iterator it;
for (it = c_h_gradient_vector[thread_id].begin();it != c_h_gradient_vector[thread_id].end();it++)
{
if (c_h_gradient.find((*it).first) == c_h_gradient.end())
{
c_h_gradient[(*it).first] = (*it).second;
}
else
{
c_h_gradient[(*it).first] += (*it).second;
}
}
}
//cout<<"the time taken to update normalization constants was "<<clock()-t<<endl;
//t = clock();
//first comput the backprop gradient
Matrix<double,Dynamic,Dynamic> context_bProp_matrix;//(myParam.embedding_dimension,current_minibatch_size);
context_bProp_matrix.setZero(myParam.embedding_dimension,current_minibatch_size);
D_prime.bProp(shuffled_training_data[myParam.ngram_size-1],minibatch_positive_weights,
minibatch_negative_samples,minibatch_negative_weights,
context_bProp_matrix,minibatch_start_index,current_minibatch_size,myParam.num_noise_samples);
//cout<<"the time taken to do bprop on the output layer was "<<clock()-t<<endl;
//now then update the parameters
//t = clock();
D_prime.computeGradientCheck(minibatch_predicted_embeddings,shuffled_training_data[myParam.ngram_size-1],minibatch_positive_weights,
minibatch_negative_samples,minibatch_negative_weights,
minibatch_start_index,current_minibatch_size,myParam.num_noise_samples,gradient_output_W,gradient_output_b);
//cout<<"the time taken to compute the gradient on the output layer was "<<clock()-t<<endl;
//now doing backprop on hidden layer to output matrix
hidden_layer_to_output_node.param->bProp(context_bProp_matrix,hidden_layer_to_output_node.bProp_matrix);
hidden_layer_to_output_node.param->computeGradientCheckOmp(context_bProp_matrix,hidden_layer_node.fProp_matrix,gradient_hidden_to_output_matrix);
//now doing backprop on the hidden node
hidden_layer_node.param->bPropTanh(hidden_layer_to_output_node.bProp_matrix,hidden_layer_node.bProp_matrix,hidden_layer_node.fProp_matrix);
hidden_layer_node.param->computeGradientCheckTanh(hidden_layer_to_output_node.bProp_matrix,hidden_layer_node.fProp_matrix,gradient_h_bias);
//now doing backprop on the context matrices
for (int word = 0;word<myParam.ngram_size-1;word++)
{
context_nodes[word].param->bProp(hidden_layer_node.bProp_matrix,context_nodes[word].bProp_matrix);
//updating the context weights
context_nodes[word].param->computeGradientCheckOmp(hidden_layer_node.bProp_matrix,word_nodes[word].fProp_matrix,gradients_context_matrix[word]);
}
//doing backprop on the word embeddings
for (int word = 0;word < myParam.ngram_size-1;word++)
{
//cout<<"the backprop matrix from the context "<<word<<" before doing word updates is "<<context_nodes[word].bProp_matrix<<endl;
//getchar();
word_nodes[word].param->computeGradientCheck(context_nodes[word].bProp_matrix,shuffled_training_data[word],minibatch_start_index,
current_minibatch_size,gradient_input_W);
}
//compute the NCE LOSS FUNTION
double current_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,
hidden_layer_to_output_node,shuffled_training_data,c_h,minibatch_predicted_embeddings,minibatch_negative_samples,
unigram_probs_vector,D_prime);
cout<<"the current nce loss is "<<setprecision(10)<<current_nce_loss<<endl;
//for all the nodes in the graph, I have to set the bprop and fprop matrices to zero
/*
hidden_layer_node.bProp_matrix.setZero();
hidden_layer_node.fProp_matrix.setZero();
hidden_layer_to_output_node.fProp_matrix.setZero();
hidden_layer_to_output_node.bProp_matrix.setZero();
for (int word = 0;word<myParam.ngram_size-1;word++)
{
context_nodes[word].fProp_matrix.setZero();
context_nodes[word].bProp_matrix.setZero();
word_nodes[word].fProp_matrix.setZero();
//word_nodes[word].bProp_matrix.setZero();
}
*/
//initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node);
//now that we have the gradients, we check our gradients using finite differences
////COMPUTING THE LOSS FUNCTION////////////////////////////
//randomly pick up some parameters whose gradient you want to inspect
//first pick some random examples from the minibatch
//srand (time(NULL));
//cout<<"the current minibatch size is "<<current_minibatch_size<<endl;
//cout<<"max is "<<min(4,current_minibatch_size)<<endl;
getchar();
for (int example = 0;example <min(4,current_minibatch_size) ;example++)
{
//checking the gradient of the normalization constant
cout<<"the example is "<<example<<endl;
vector<int> context;//(ngram_size-1);
//creating the context
for (int word = 0;word<ngram_size-1;word++)
{
context.push_back(shuffled_training_data[word](minibatch_start_index+example));
cout<<"context word "<<word<<" is "<<shuffled_training_data[word](minibatch_start_index+example)<<endl;
//context.push_back((*thread_data_col_locations[thread_id][word])(minibatch_start_index+train_id));
}
c_h[context] +=delta_perturb;
double perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,
hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples,
unigram_probs_vector,D_prime);
double finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb;
double difference = finite_difference_gradient - c_h_gradient[context];
cout<<"the original gradient is "<<c_h_gradient[context]<<endl;
cout<<"the finite difference gradient is "<<finite_difference_gradient<<endl;
cout<<"the ratio is "<<c_h_gradient[context]/finite_difference_gradient<<endl;
cout<<"the difference for c_h was "<<abs(difference)<<endl;
c_h[context] -= delta_perturb;
getchar();
//checking the gradient of the hidden layer to output node context matrix
initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node);
int row_perturb_dimension = rand() % n_hidden;
cout<<"the row perturb dimension was "<<row_perturb_dimension<<endl;
int col_perturb_dimension = rand() % embedding_dimension;
cout<<"the col perturb dimension was "<<col_perturb_dimension<<endl;
//first perturb
//cout<<"before perturbation the dimension was "<< context_nodes[word].param->U(row_perturb_dimension,col_perturb_dimension)<<endl;
hidden_layer_to_output_node.param->U(row_perturb_dimension,col_perturb_dimension) += delta_perturb;
//cout<<"after perturbation the dimension was "<< context_nodes[word].param->U(row_perturb_dimension,col_perturb_dimension)<<endl;
//then do fprop
fPropGradCheck(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node,shuffled_training_data);
//then compute NCE loss function
perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,
hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples,
unigram_probs_vector,D_prime);
finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb;
difference = finite_difference_gradient - gradient_hidden_to_output_matrix(row_perturb_dimension,col_perturb_dimension);
cout<<"the ratio is "<<gradient_hidden_to_output_matrix(row_perturb_dimension,col_perturb_dimension)/finite_difference_gradient<<endl;
cout<<"the difference for hidden to output context matrix was "<<abs(difference)<<endl;
hidden_layer_to_output_node.param->U(row_perturb_dimension,col_perturb_dimension) -= delta_perturb;
getchar();
//restoring the fprop to the original one
initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node);
fPropGradCheck(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node,shuffled_training_data);
int example_id = rand() % current_minibatch_size;
//now pick that example , perturb, do fprop and check the gradient
Matrix<double,1,Dynamic> perturb_vector(embedding_dimension);
int perturb_dimension = rand() % embedding_dimension;
cout<<"the perturb dimension was "<<perturb_dimension<<endl;
int output_word = shuffled_training_data[myParam.ngram_size-1](minibatch_start_index+example_id);
cout<<"the output word was "<<output_word<<endl;
D_prime.W(output_word,perturb_dimension) += delta_perturb;
perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,
hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples,
unigram_probs_vector,D_prime);
//cout<<"the perturbed nce loss was "<<perturbed_nce_loss<<endl;
//cout<<"the finite difference gradient of the perturb dimension "<<perturb_dimension<<" was "<<(perturbed_nce_loss-current_nce_loss)/delta_perturb<<endl;
finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb;
difference = finite_difference_gradient - gradient_output_W(output_word,perturb_dimension);
cout<<"the ratio is "<<gradient_output_W(output_word,perturb_dimension)/finite_difference_gradient<<endl;
cout<<"the difference for output W was "<<abs(difference)<<endl;
D_prime.W(output_word,perturb_dimension) -= delta_perturb;
getchar();
//now checking the gradient for output bias
D_prime.b(output_word) += delta_perturb;
perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,
hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples,
unigram_probs_vector,D_prime);
finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb;
difference = finite_difference_gradient - gradient_output_b(output_word);
cout<<"the ratio is "<<gradient_output_b(output_word)/finite_difference_gradient<<endl;
cout<<"the difference for output b was "<<abs(difference)<<endl;
D_prime.b(output_word) -= delta_perturb;
getchar();
//checking the gradient for one of the words in the noise samples
int noise_word_id = rand()%num_noise_samples;
cout<<"the noise word id was "<<noise_word_id<<endl;
int noise_word = minibatch_negative_samples(example_id,noise_word_id);
cout<<"the noise word was "<<noise_word<<endl;
D_prime.W(noise_word,perturb_dimension) += delta_perturb;
perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,
hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples,
unigram_probs_vector,D_prime);
finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb;
difference = finite_difference_gradient - gradient_output_W(noise_word,perturb_dimension);
cout<<"the ratio is "<<gradient_output_W(noise_word,perturb_dimension)/finite_difference_gradient<<endl;
cout<<"the difference for output noise W was "<<abs(difference)<<endl;
D_prime.W(noise_word,perturb_dimension) -= delta_perturb;
getchar();
//now checking the gradient for hbias
//cout<<"gradient h bias is "<<gradient_h_bias<<endl;
initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node);
perturb_dimension = rand() % n_hidden;
//cout<<"the perturb dimension is "<<perturb_dimension<<endl;
//out<<"h bias before perturbing is "<<hidden_layer_node.param->h_bias<<endl;
hidden_layer_node.param->h_bias(perturb_dimension) += delta_perturb;
//cout<<"h bias after perturbing is "<<hidden_layer_node.param->h_bias<<endl;
fPropGradCheck(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node,shuffled_training_data);
perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,
hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples,
unigram_probs_vector,D_prime);
cout<<"the perturbed loss was "<<perturbed_nce_loss<<endl;
finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/ delta_perturb;
cout<<"the finited difference gradient for h bias was "<<finite_difference_gradient<<endl;
difference = finite_difference_gradient -gradient_h_bias(perturb_dimension);
cout<<"the ratio is "<<gradient_h_bias(perturb_dimension)/finite_difference_gradient<<endl;
cout<<"the difference for h_bias was was "<<abs(difference)<<endl;
hidden_layer_node.param->h_bias(perturb_dimension) -= delta_perturb;
getchar();
for (int word = 0;word<myParam.ngram_size-1;word++)
{
cout<<"the word is "<<word<<endl;
for (int num_perturb = 0;num_perturb<3;num_perturb++)
{
initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node);
perturb_dimension = rand() % embedding_dimension;
cout<<"the perturb dimension was "<<perturb_dimension<<endl;
//first perturb
int input_word = shuffled_training_data[word](minibatch_start_index+example_id);
//cout<<"the input word was "<<input_word<<endl;
//cout<<"before perturbation the dimension was "<< word_nodes[word].param->W(input_word,perturb_dimension)<<endl;
word_nodes[word].param->W(input_word,perturb_dimension) += delta_perturb;
//cout<<"after perturbation the dimension was "<< word_nodes[word].param->W(input_word,perturb_dimension)<<endl;
//then do fprop
fPropGradCheck(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node,shuffled_training_data);
//then compute NCE loss function
double perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,
hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples,
unigram_probs_vector,D_prime);
//cout<<"the perturbed nce loss was "<<perturbed_nce_loss<<endl;
finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb;
difference = finite_difference_gradient - gradient_input_W(input_word,perturb_dimension);
cout<<"the ratio is "<<gradient_input_W(input_word,perturb_dimension)/finite_difference_gradient<<endl;
cout<<"the difference for input W was was "<<abs(difference)<<endl;
//cout<<"the finite difference gradient of the perturb dimension "<<perturb_dimension<<" was "<<(perturbed_nce_loss-current_nce_loss)/delta_perturb<<endl;
//cout<<"the gradient was "<<gradient_input_W(input_word,perturb_dimension);
//unpurturbing
word_nodes[word].param->W(input_word,perturb_dimension)-= delta_perturb;
if (abs(difference) > 10E-6)
{
cout<<"the difference was greater than 10E-6 and the original paramter was "<<word_nodes[word].param->W(input_word,perturb_dimension)<<endl;
}
getchar();
}
//now perturbing the U matrices and checking gradients via finite differences
for (int num_perturb = 0;num_perturb<3;num_perturb++)
{
initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node);
int row_perturb_dimension = rand() % n_hidden;
cout<<"the row perturb dimension was "<<row_perturb_dimension<<endl;
int col_perturb_dimension = rand() % embedding_dimension;
cout<<"the col perturb dimension was "<<col_perturb_dimension<<endl;
//first perturb
//cout<<"before perturbation the dimension was "<< context_nodes[word].param->U(row_perturb_dimension,col_perturb_dimension)<<endl;
context_nodes[word].param->U(row_perturb_dimension,col_perturb_dimension) += delta_perturb;
//cout<<"after perturbation the dimension was "<< context_nodes[word].param->U(row_perturb_dimension,col_perturb_dimension)<<endl;
//then do fprop
fPropGradCheck(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node,shuffled_training_data);
//then compute NCE loss function
double perturbed_nce_loss = computeLossFunction(myParam,minibatch_start_index,current_minibatch_size,word_nodes,context_nodes,hidden_layer_node,
hidden_layer_to_output_node,shuffled_training_data,c_h,hidden_layer_to_output_node.fProp_matrix,minibatch_negative_samples,
unigram_probs_vector,D_prime);
finite_difference_gradient = (perturbed_nce_loss-current_nce_loss)/delta_perturb;
difference = finite_difference_gradient - gradients_context_matrix[word](row_perturb_dimension,col_perturb_dimension);
cout<<"the ratio is "<<gradients_context_matrix[word](row_perturb_dimension,col_perturb_dimension)/finite_difference_gradient<<endl;
cout<<"the difference for context matrix was "<<abs(difference)<<endl;
//cout<<"the perturbed nce loss was "<<perturbed_nce_loss<<endl;
//cout<<"the finite difference gradient of the perturb dimension "<<perturb_dimension<<" was "<<(perturbed_nce_loss-current_nce_loss)/delta_perturb<<endl;
//cout<<"the gradient was "<<gradients_context_matrix[word](row_perturb_dimension,col_perturb_dimension);
//unpurturbing
context_nodes[word].param->U(row_perturb_dimension,col_perturb_dimension) -= delta_perturb;
getchar();
}
}
}
initZero(myParam,word_nodes,context_nodes,hidden_layer_node,hidden_layer_to_output_node);
}
void initZero(param & myParam,vector<word_node > &word_nodes,vector<context_node > &context_nodes,hidden_node &hidden_layer_node,
context_node & hidden_layer_to_output_node)
{
//for all the nodes in the graph, I have to set the bprop and fprop matrices to zero
hidden_layer_node.bProp_matrix.setZero();
hidden_layer_node.fProp_matrix.setZero();
hidden_layer_to_output_node.fProp_matrix.setZero();
hidden_layer_to_output_node.bProp_matrix.setZero();
for (int word = 0;word<myParam.ngram_size-1;word++)
{
context_nodes[word].fProp_matrix.setZero();
context_nodes[word].bProp_matrix.setZero();
word_nodes[word].fProp_matrix.setZero();
//word_nodes[word].bProp_matrix.setZero();
}
}
double computeLossFunction(param & myParam,int minibatch_start_index,int current_minibatch_size,vector<word_node > &word_nodes,
vector<context_node > &context_nodes,hidden_node &hidden_layer_node,context_node & hidden_layer_to_output_node,
vector<Matrix<int,Dynamic,1> >&shuffled_training_data,vector_map & c_h,Matrix<double,Dynamic,Dynamic> & minibatch_predicted_embeddings,
Matrix<int,Dynamic,Dynamic> & minibatch_negative_samples,vector<vector<double> > &unigram_probs_vector,Output_word_embeddings & D_prime)
{
std::setprecision(9);
int ngram_size = myParam.ngram_size;
int n_vocab = myParam.n_vocab;
int embedding_dimension = myParam.embedding_dimension;
int minibatch_size = myParam.minibatch_size;
int num_noise_samples = myParam.num_noise_samples;
double normalization_init = myParam.normalization_init;
//parallelizing the creation with multithreading
Eigen::initParallel();
Eigen::setNbThreads(1);
double minibatch_loss;
#pragma omp parallel firstprivate(minibatch_size,minibatch_start_index,ngram_size,n_vocab,embedding_dimension, \
num_noise_samples,normalization_init)
{
#pragma omp for reduction(+:minibatch_loss) //schedule(dynamic)
for (int train_id = 0;train_id < minibatch_size;train_id++)
{
int thread_id = omp_get_thread_num();
int output_word = shuffled_training_data[ngram_size-1](minibatch_start_index+train_id);
//cout<<"output word is "<<output_word<<endl;
Matrix<double,Dynamic,1> predicted_embedding = minibatch_predicted_embeddings.col(train_id);
//cout<<"predicted embedding is "<<endl<<predicted_embedding<<endl;
vector<int> context;//(ngram_size-1);
//creating the context
for (int word = 0;word<ngram_size-1;word++)
{
context.push_back(shuffled_training_data[word](minibatch_start_index+train_id));
//context.push_back((*thread_data_col_locations[thread_id][word])(minibatch_start_index+train_id));
}
double log_inv_normalization_const_h = 0.;
//getting a normalization constant and making it threasafe
//this region does not need to be critical because its just a read
log_inv_normalization_const_h = c_h[context];
double inv_normalization_const_h = exp(log_inv_normalization_const_h);
//cout<<"The normalization constant is "<<inv_normalization_const_h<<endl;
//setting the gradient for that context to 0;
//double c_h_gradient_vector = 0.0;
double score = D_prime.W.row(output_word).dot(predicted_embedding) + D_prime.b(output_word);
//cout<<"the positive score is "<<score<<endl;
double unnorm_positive_prob = exp(score);
//cout<<"the unnorm positive prob is "<<unnorm_positive_prob<<endl;
//cout<<"the unigram prob is "<<unigram_probs_vector[thread_id][output_word]<<endl;
//cout<<"the positive prob is "<<
double sample_loss = 0.;
double positive_prob = (unnorm_positive_prob*inv_normalization_const_h/
(unnorm_positive_prob*inv_normalization_const_h + num_noise_samples * unigram_probs_vector[thread_id][output_word]));
//cout<<"positive prob is "<<positive_prob<<endl;
sample_loss = log(positive_prob);
//cout<<"sample loss is "<<sample_loss<<endl;
///COMPUTING NOISE SAMPLES///
for (int sample_id = 0;sample_id <num_noise_samples;sample_id++)
{
int sample = minibatch_negative_samples(train_id,sample_id);
assert (sample >= 0);
double negative_score = D_prime.W.row(sample).dot(predicted_embedding) + D_prime.b(sample);
//cout<<"the negative score is "<<negative_score<<endl;
//cout<<"the sample was "<<sample<<endl;
double negative_unnorm_prob = exp(negative_score);
double negative_prob = num_noise_samples*unigram_probs_vector[thread_id][sample]/
(negative_unnorm_prob*inv_normalization_const_h + num_noise_samples * unigram_probs_vector[thread_id][sample]);
//cout<<"negative prob is "<<negative_prob<<endl;
sample_loss += log(negative_prob);
}
//cout<<"the sample loss is "<<sample_loss<<endl;
minibatch_loss += sample_loss;
}
}
#pragma omp barrier
return(minibatch_loss);
}
void inline fPropGradCheck(param & myParam,int minibatch_start_index,int current_minibatch_size,vector<word_node > &word_nodes,
vector<context_node > &context_nodes,hidden_node &hidden_layer_node,context_node & hidden_layer_to_output_node,
vector<Matrix<int,Dynamic,1> >&data)
{
/////FORWARD PROPAGATION/////////////
//doing forward propagation first with word nodes
for (int word = 0;word<myParam.ngram_size-1;word++)
{
word_nodes[word].param->fPropOmp(data[word],word_nodes[word].fProp_matrix,minibatch_start_index,current_minibatch_size);
}
Eigen::setNbThreads(myParam.n_threads);
//doing forward prop with the context nodes
for (int word = 0;word<myParam.ngram_size-1;word++)
{
context_nodes[word].param->fProp(word_nodes[word].fProp_matrix,context_nodes[word].fProp_matrix);
//cout<<"context fprop matrix was "<<context_nodes[word].fProp_matrix<<endl;
}
//doing forward prop with the hidden nodes
hidden_layer_node.param->fPropTanh(context_nodes,hidden_layer_node.fProp_matrix);
/*
Matrix<double,Dynamic,Dynamic> hidden_layer_input(myParam.n_hidden,myParam.embedding_dimension);
for (int word = 0;word<myParam.ngram_size-1;word++)
{
hidden_layer_node.param->fPropTanh(context_nodes[word].fProp_matrix,hidden_layer_node.fProp_matrix);
}
*/
//cout<<"the hidden fprop matrix was "<<hidden_layer_node.fProp_matrix<<endl;
//now doing forward prop with the hidden to output nodes
hidden_layer_to_output_node.param->fProp(hidden_layer_node.fProp_matrix,hidden_layer_to_output_node.fProp_matrix);
//cout<<"the hidden to output fprop matrix was "<<hidden_layer_to_output_node.fProp_matrix<<endl;
}
|
cancel.c | // RUN: %libomp-compile && env OMP_CANCELLATION=true %libomp-run | %sort-threads | FileCheck %s
// REQUIRES: ompt
#include "callback.h"
int main()
{
#pragma omp parallel num_threads(1)
{
int x = 0;
int i;
#pragma omp for
for(i = 0; i < 2; i++)
{
#pragma omp cancel for
}
#pragma omp sections
{
#pragma omp section
{
#pragma omp cancel sections
}
#pragma omp section
{
#pragma omp cancel sections
}
}
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_cancel'
// CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_task_create: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter=[[NULL]], new_task_id=[[TASK_ID:[0-9]+]], parallel_function={{0x[0-f]*}}, task_type=ompt_task_initial=1, has_dependences=no
// cancel for
// ___CHECK: {{^}}[[MASTER_ID]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=20, codeptr_ra={{0x[0-f]*}}
// cancel sections
// ___CHECK: {{^}}[[MASTER_ID]]: ompt_event_cancel: task_data=[[TASK_ID:[0-9]+]], flags=18, codeptr_ra={{0x[0-f]*}}
return 0;
}
|
mmul_omp.c | /*
This file is part of ParTI!.
ParTI! is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
ParTI! is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with ParTI!.
If not, see <http://www.gnu.org/licenses/>.
*/
#include <ParTI.h>
#include <stdlib.h>
#include "sptensor.h"
int sptOmpSparseTensorMulMatrix(sptSemiSparseTensor *Y, sptSparseTensor * const X, sptMatrix * const U, sptIndex mode)
{
int result;
sptIndex *ind_buf;
sptIndex m;
sptNnzIndexVector fiberidx;
if(mode >= X->nmodes) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP SpTns * Mtx", "shape mismatch");
}
if(X->ndims[mode] != U->nrows) {
spt_CheckError(SPTERR_SHAPE_MISMATCH, "OMP SpTns * Mtx", "shape mismatch");
}
sptSparseTensorSortIndexAtMode(X, mode, 0);
// jli: try to avoid malloc in all operation functions.
ind_buf = malloc(X->nmodes * sizeof *ind_buf);
spt_CheckOSError(!ind_buf, "OMP SpTns * Mtx");
for(m = 0; m < X->nmodes; ++m) {
ind_buf[m] = X->ndims[m];
}
ind_buf[mode] = U->ncols;
// jli: use pre-processing to allocate Y size outside this function.
result = sptNewSemiSparseTensor(Y, X->nmodes, mode, ind_buf);
free(ind_buf);
spt_CheckError(result, "OMP SpTns * Mtx", NULL);
sptSemiSparseTensorSetIndices(Y, &fiberidx, X);
sptTimer timer;
sptNewTimer(&timer, 0);
sptStartTimer(timer);
#pragma omp parallel for
for(sptNnzIndex i = 0; i < Y->nnz; ++i) {
sptNnzIndex inz_begin = fiberidx.data[i];
sptNnzIndex inz_end = fiberidx.data[i+1];
// jli: exchange two loops
for(sptNnzIndex j = inz_begin; j < inz_end; ++j) {
sptIndex r = X->inds[mode].data[j];
for(sptIndex k = 0; k < U->ncols; ++k) {
Y->values.values[i*Y->stride + k] += X->values.data[j] * U->values[r*U->stride + k];
}
}
}
sptStopTimer(timer);
sptPrintElapsedTime(timer, "OMP SpTns * Mtx");
sptFreeTimer(timer);
sptFreeNnzIndexVector(&fiberidx);
return 0;
}
|
GB_apply_op.c | //------------------------------------------------------------------------------
// GB_apply_op: typecast and apply a unary or binary operator to an array
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Cx = op (A)
// Cx and A->x may be aliased.
// This function is CSR/CSC agnostic. For positional ops, A is treated as if
// it is in CSC format. The caller has already modified the op if A is in CSR
// format.
// Template/GB_positional_op_ijp can return GrB_OUT_OF_MEMORY.
// Otherwise, this function only returns GrB_SUCCESS.
#include "GB_apply.h"
#include "GB_binop.h"
#include "GB_ek_slice.h"
#include "GB_unused.h"
#ifndef GBCOMPACT
#include "GB_unop__include.h"
#include "GB_binop__include.h"
#endif
#define GB_FREE_ALL \
{ \
GB_WERK_POP (A_ek_slicing, int64_t) ; \
}
GrB_Info GB_apply_op // apply a unary operator, Cx = op (A)
(
GB_void *Cx, // output array, of type op->ztype
const GrB_UnaryOp op1, // unary operator to apply
const GrB_BinaryOp op2, // binary operator to apply
const GxB_Scalar scalar, // scalar to bind to binary operator
bool binop_bind1st, // if true, binop(x,Ax) else binop(Ax,y)
const GrB_Matrix A, // input matrix
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Cx != NULL) ;
ASSERT (op1 != NULL || op2 != NULL) ;
ASSERT_MATRIX_OK (A, "A input for GB_apply_op", GB0) ;
ASSERT (GB_JUMBLED_OK (A)) ; // A can be jumbled
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
//--------------------------------------------------------------------------
// get A
//--------------------------------------------------------------------------
// A->x is not const since the operator might be applied in-place, if
// C is aliased to C.
GB_void *Ax = (GB_void *) A->x ; // A->x has type A->type
const int8_t *Ab = A->b ; // only if A is bitmap
const GrB_Type Atype = A->type ; // type of A->x
const int64_t anz = GB_NNZ_HELD (A) ; // size of A->x and Cx
//--------------------------------------------------------------------------
// determine the maximum number of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// apply the operator
//--------------------------------------------------------------------------
GB_Opcode opcode = (op1 != NULL) ? op1->opcode : op2->opcode ;
if (GB_OPCODE_IS_POSITIONAL (opcode))
{
//----------------------------------------------------------------------
// built-in positional unary or binary operator
//----------------------------------------------------------------------
bool is64 ;
if (op1 != NULL)
{
ASSERT_UNARYOP_OK (op1, "positional op1 for GB_apply_op", GB0) ;
is64 = (op1->ztype == GrB_INT64) ;
}
else // if (op2 != NULL)
{
ASSERT_BINARYOP_OK (op2, "positional op2 for GB_apply_op", GB0) ;
is64 = (op2->ztype == GrB_INT64) ;
}
// get A and C
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ai = A->i ;
int64_t anvec = A->nvec ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
//----------------------------------------------------------------------
// determine number of threads to use
//----------------------------------------------------------------------
int nthreads = GB_nthreads (anz + anvec, chunk, nthreads_max) ;
int ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ;
//----------------------------------------------------------------------
// Cx = positional_op (A)
//----------------------------------------------------------------------
int64_t offset = GB_positional_offset (opcode) ;
// GB_positional_op_ijp allocates a set of tasks, which can possibly
// fail if out of memory.
if (is64)
{
int64_t *restrict Cx_int = (int64_t *) Cx ;
switch (opcode)
{
case GB_POSITIONI_opcode : // z = position_i(A(i,j)) == i
case GB_POSITIONI1_opcode : // z = position_i1(A(i,j)) == i+1
case GB_FIRSTI_opcode : // z = first_i(A(i,j),y) == i
case GB_FIRSTI1_opcode : // z = first_i1(A(i,j),y) == i+1
case GB_SECONDI_opcode : // z = second_i(x,A(i,j)) == i
case GB_SECONDI1_opcode : // z = second_i1(x,A(i,j)) == i+1
#define GB_POSITION i + offset
#include "GB_positional_op_ip.c"
return (GrB_SUCCESS) ;
case GB_POSITIONJ_opcode : // z = position_j(A(i,j)) == j
case GB_POSITIONJ1_opcode : // z = position_j1(A(i,j)) == j+1
case GB_FIRSTJ_opcode : // z = first_j(A(i,j),y) == j
case GB_FIRSTJ1_opcode : // z = first_j1(A(i,j),y) == j+1
case GB_SECONDJ_opcode : // z = second_j(x,A(i,j)) == j
case GB_SECONDJ1_opcode : // z = second_j1(x,A(i,j)) == j+1
#define GB_POSITION j + offset
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
default: ;
}
}
else
{
int32_t *restrict Cx_int = (int32_t *) Cx ;
switch (opcode)
{
case GB_POSITIONI_opcode : // z = position_i(A(i,j)) == i
case GB_POSITIONI1_opcode : // z = position_i1(A(i,j)) == i+1
case GB_FIRSTI_opcode : // z = first_i(A(i,j),y) == i
case GB_FIRSTI1_opcode : // z = first_i1(A(i,j),y) == i+1
case GB_SECONDI_opcode : // z = second_i(x,A(i,j)) == i
case GB_SECONDI1_opcode : // z = second_i1(x,A(i,j)) == i+1
#define GB_POSITION (int32_t) (i + offset)
#include "GB_positional_op_ip.c"
return (GrB_SUCCESS) ;
case GB_POSITIONJ_opcode : // z = position_j(A(i,j)) == j
case GB_POSITIONJ1_opcode : // z = position_j1(A(i,j)) == j+1
case GB_FIRSTJ_opcode : // z = first_j(A(i,j),y) == j
case GB_FIRSTJ1_opcode : // z = first_j1(A(i,j),y) == j+1
case GB_SECONDJ_opcode : // z = second_j(x,A(i,j)) == j
case GB_SECONDJ1_opcode : // z = second_j1(x,A(i,j)) == j+1
#define GB_POSITION (int32_t) (j + offset)
#include "GB_positional_op_ijp.c"
return (GrB_SUCCESS) ;
default: ;
}
}
}
else if (op1 != NULL)
{
//----------------------------------------------------------------------
// unary operator
//----------------------------------------------------------------------
ASSERT_UNARYOP_OK (op1, "op1 for GB_apply_op", GB0) ;
// determine number of threads to use
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
GrB_UnaryOp op = op1 ;
#ifndef GBCOMPACT
if ((Atype == op->xtype)
|| (opcode == GB_IDENTITY_opcode) || (opcode == GB_ONE_opcode))
{
// The switch factory is used if the op is IDENTITY or ONE, or if
// no typecasting is being done. The ONE operator ignores the type
// of its input and just produces a 1 of op->ztype == op->xtype.
// The IDENTITY operator can do arbitrary typecasting.
//------------------------------------------------------------------
// define the worker for the switch factory
//------------------------------------------------------------------
#define GB_unop_apply(op,zname,aname) \
GB (_unop_apply_ ## op ## zname ## aname)
#define GB_WORKER(op,zname,ztype,aname,atype) \
{ \
if (GB_unop_apply (op,zname,aname) ((ztype *) Cx, \
(const atype *) Ax, Ab, anz, nthreads) \
== GrB_SUCCESS) return (GrB_SUCCESS) ; \
} \
break ;
//------------------------------------------------------------------
// launch the switch factory
//------------------------------------------------------------------
#include "GB_unop_factory.c"
}
#endif
//----------------------------------------------------------------------
// generic worker: typecast and apply a unary operator
//----------------------------------------------------------------------
GB_BURBLE_N (anz, "(generic apply: %s) ", op->name) ;
size_t asize = Atype->size ;
size_t zsize = op->ztype->size ;
size_t xsize = op->xtype->size ;
GB_cast_function
cast_A_to_X = GB_cast_factory (op->xtype->code, Atype->code) ;
GxB_unary_function fop = op->function ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// xwork = (xtype) Ax [p]
GB_void xwork [GB_VLA(xsize)] ;
cast_A_to_X (xwork, Ax +(p*asize), asize) ;
// Cx [p] = fop (xwork)
fop (Cx +(p*zsize), xwork) ;
}
}
else
{
//----------------------------------------------------------------------
// binary operator
//----------------------------------------------------------------------
ASSERT_BINARYOP_OK (op2, "standard op2 for GB_apply_op", GB0) ;
ASSERT_SCALAR_OK (scalar, "scalar for GB_apply_op", GB0) ;
// determine number of threads to use
int nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
GB_Type_code xcode, ycode, zcode ;
bool op_is_first = (opcode == GB_FIRST_opcode) ;
bool op_is_second = (opcode == GB_SECOND_opcode) ;
bool op_is_pair = (opcode == GB_PAIR_opcode) ;
size_t asize = Atype->size ;
size_t ssize = scalar->type->size ;
size_t zsize = op2->ztype->size ;
size_t xsize = op2->xtype->size ;
size_t ysize = op2->ytype->size ;
GB_Type_code scode = scalar->type->code ;
xcode = op2->xtype->code ;
ycode = op2->ytype->code ;
// typecast the scalar to the operator input
bool ignore_scalar = false ;
size_t ssize_cast ;
GB_Type_code scode_cast ;
if (binop_bind1st)
{
ssize_cast = xsize ;
scode_cast = xcode ;
ignore_scalar = op_is_second || op_is_pair ;
}
else
{
ssize_cast = ysize ;
scode_cast = ycode ;
ignore_scalar = op_is_first || op_is_pair ;
}
GB_void swork [GB_VLA(ssize_cast)] ;
GB_void *scalarx = (GB_void *) scalar->x ;
if (scode_cast != scode && !ignore_scalar)
{
// typecast the scalar to the operator input, in swork
GB_cast_function cast_s = GB_cast_factory (scode_cast, scode) ;
cast_s (swork, scalar->x, ssize) ;
scalarx = swork ;
}
#ifndef GBCOMPACT
if (binop_bind1st)
{
//--------------------------------------------------------------
// z = op(scalar,Ax)
//--------------------------------------------------------------
if (GB_binop_builtin (
op2->xtype, ignore_scalar,
Atype, op_is_first || op_is_pair,
op2, false, &opcode, &xcode, &ycode, &zcode))
{
//----------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------
#define GB_bind1st(op,xname) GB (_bind1st_ ## op ## xname)
#define GB_BINOP_WORKER(op,xname) \
{ \
if (GB_bind1st (op, xname) (Cx, scalarx, Ax, Ab, anz,\
nthreads) == GrB_SUCCESS) return (GrB_SUCCESS) ; \
} \
break ;
//----------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------
#define GB_NO_SECOND
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
}
else
{
//--------------------------------------------------------------
// z = op(Ax,scalar)
//--------------------------------------------------------------
if (GB_binop_builtin (
Atype, op_is_second || op_is_pair,
op2->ytype, ignore_scalar,
op2, false, &opcode, &xcode, &ycode, &zcode))
{
//----------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------
#define GB_bind2nd(op,xname) GB (_bind2nd_ ## op ## xname)
#undef GB_BINOP_WORKER
#define GB_BINOP_WORKER(op,xname) \
{ \
if (GB_bind2nd (op, xname) (Cx, Ax, scalarx, Ab, anz,\
nthreads) == GrB_SUCCESS) return (GrB_SUCCESS) ; \
} \
break ;
//----------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------
#define GB_NO_FIRST
#define GB_NO_PAIR
#include "GB_binop_factory.c"
}
}
#endif
//----------------------------------------------------------------------
// generic worker: typecast and apply a binary operator
//----------------------------------------------------------------------
GB_BURBLE_N (anz, "(generic apply: %s) ", op2->name) ;
GB_Type_code acode = Atype->code ;
GxB_binary_function fop = op2->function ;
if (binop_bind1st)
{
// Cx = op (scalar,Ax)
GB_cast_function cast_A_to_Y = GB_cast_factory (ycode, acode) ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// ywork = (ytype) Ax [p]
GB_void ywork [GB_VLA(ysize)] ;
cast_A_to_Y (ywork, Ax +(p*asize), asize) ;
// Cx [p] = fop (xwork, ywork)
fop (Cx +(p*zsize), scalarx, ywork) ;
}
}
else
{
// Cx = op (Ax,scalar)
GB_cast_function cast_A_to_X = GB_cast_factory (xcode, acode) ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
// xwork = (xtype) Ax [p]
GB_void xwork [GB_VLA(xsize)] ;
cast_A_to_X (xwork, Ax +(p*asize), asize) ;
// Cx [p] = fop (xwork, ywork)
fop (Cx +(p*zsize), xwork, scalarx) ;
}
}
}
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
}
|
nvector_openmpdev.c | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner and Shelby Lockhart @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This NVECTOR module is based on the NVECTOR
* Serial module by Scott D. Cohen, Alan C.
* Hindmarsh, Radu Serban, and Aaron Collier
* @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2019, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for an OpenMP DEV implementation
* of the NVECTOR module.
* -----------------------------------------------------------------*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_openmpdev.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Private functions for special cases of vector operations */
static void VCopy_OpenMPDEV(N_Vector x, N_Vector z); /* z=x */
static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */
static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */
static void VNeg_OpenMPDEV(N_Vector x, N_Vector z); /* z=-x */
static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */
static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */
static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */
static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */
static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */
static void VScaleBy_OpenMPDEV(realtype a, N_Vector x); /* x <- ax */
/* Private functions for special cases of vector array operations */
static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */
static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */
static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */
static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */
static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */
static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */
static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
* Returns vector type ID. Used to identify vector implementation
* from abstract N_Vector interface.
*/
N_Vector_ID N_VGetVectorID_OpenMPDEV(N_Vector v)
{
return SUNDIALS_NVEC_OPENMPDEV;
}
/* ----------------------------------------------------------------------------
* Function to create a new empty vector
*/
N_Vector N_VNewEmpty_OpenMPDEV(sunindextype length)
{
N_Vector v;
N_Vector_Ops ops;
N_VectorContent_OpenMPDEV content;
/* Create vector */
v = NULL;
v = (N_Vector) malloc(sizeof *v);
if (v == NULL) return(NULL);
/* Create vector operation structure */
ops = NULL;
ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops));
if (ops == NULL) { free(v); return(NULL); }
ops->nvgetvectorid = N_VGetVectorID_OpenMPDEV;
ops->nvclone = N_VClone_OpenMPDEV;
ops->nvcloneempty = N_VCloneEmpty_OpenMPDEV;
ops->nvdestroy = N_VDestroy_OpenMPDEV;
ops->nvspace = N_VSpace_OpenMPDEV;
ops->nvgetarraypointer = NULL;
ops->nvsetarraypointer = NULL;
/* standard vector operations */
ops->nvlinearsum = N_VLinearSum_OpenMPDEV;
ops->nvconst = N_VConst_OpenMPDEV;
ops->nvprod = N_VProd_OpenMPDEV;
ops->nvdiv = N_VDiv_OpenMPDEV;
ops->nvscale = N_VScale_OpenMPDEV;
ops->nvabs = N_VAbs_OpenMPDEV;
ops->nvinv = N_VInv_OpenMPDEV;
ops->nvaddconst = N_VAddConst_OpenMPDEV;
ops->nvdotprod = N_VDotProd_OpenMPDEV;
ops->nvmaxnorm = N_VMaxNorm_OpenMPDEV;
ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMPDEV;
ops->nvwrmsnorm = N_VWrmsNorm_OpenMPDEV;
ops->nvmin = N_VMin_OpenMPDEV;
ops->nvwl2norm = N_VWL2Norm_OpenMPDEV;
ops->nvl1norm = N_VL1Norm_OpenMPDEV;
ops->nvcompare = N_VCompare_OpenMPDEV;
ops->nvinvtest = N_VInvTest_OpenMPDEV;
ops->nvconstrmask = N_VConstrMask_OpenMPDEV;
ops->nvminquotient = N_VMinQuotient_OpenMPDEV;
/* fused vector operations (optional, NULL means disabled by default) */
ops->nvlinearcombination = NULL;
ops->nvscaleaddmulti = NULL;
ops->nvdotprodmulti = NULL;
/* vector array operations (optional, NULL means disabled by default) */
ops->nvlinearsumvectorarray = NULL;
ops->nvscalevectorarray = NULL;
ops->nvconstvectorarray = NULL;
ops->nvwrmsnormvectorarray = NULL;
ops->nvwrmsnormmaskvectorarray = NULL;
ops->nvscaleaddmultivectorarray = NULL;
ops->nvlinearcombinationvectorarray = NULL;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMPDEV) malloc(sizeof(struct _N_VectorContent_OpenMPDEV));
if (content == NULL) { free(ops); free(v); return(NULL); }
content->length = length;
content->own_data = SUNFALSE;
content->host_data = NULL;
content->dev_data = NULL;
/* Attach content and ops */
v->content = content;
v->ops = ops;
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a new vector
*/
N_Vector N_VNew_OpenMPDEV(sunindextype length)
{
N_Vector v;
realtype *data;
realtype *dev_data;
int dev;
v = NULL;
v = N_VNewEmpty_OpenMPDEV(length);
if (v == NULL) return(NULL);
/* Create data */
if (length > 0) {
/* Allocate memory on host */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
/* Allocate memory on device */
dev = omp_get_default_device();
dev_data = omp_target_alloc(length * sizeof(realtype), dev);
if(data == NULL) { N_VDestroy_OpenMPDEV(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMPDEV(v) = SUNTRUE;
NV_DATA_HOST_OMPDEV(v) = data;
NV_DATA_DEV_OMPDEV(v) = dev_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a vector with user data component
*/
N_Vector N_VMake_OpenMPDEV(sunindextype length, realtype *h_vdata, realtype *d_vdata)
{
N_Vector v;
int dev, host;
if (h_vdata == NULL || d_vdata == NULL) return(NULL);
v = NULL;
v = N_VNewEmpty_OpenMPDEV(length);
if (v == NULL) return(NULL);
if (length > 0) {
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Attach data */
NV_OWN_DATA_OMPDEV(v) = SUNFALSE;
NV_DATA_HOST_OMPDEV(v) = h_vdata;
NV_DATA_DEV_OMPDEV(v) = d_vdata;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors.
*/
N_Vector *N_VCloneVectorArray_OpenMPDEV(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VClone_OpenMPDEV(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMPDEV(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors with NULL data array.
*/
N_Vector *N_VCloneVectorArrayEmpty_OpenMPDEV(int count, N_Vector w)
{
N_Vector *vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector *) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VCloneEmpty_OpenMPDEV(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMPDEV(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_OpenMPDEV
*/
void N_VDestroyVectorArray_OpenMPDEV(N_Vector *vs, int count)
{
int j;
for (j = 0; j < count; j++) N_VDestroy_OpenMPDEV(vs[j]);
free(vs); vs = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Function to return number of vector elements
*/
sunindextype N_VGetLength_OpenMPDEV(N_Vector v)
{
return NV_LENGTH_OMPDEV(v);
}
/* ----------------------------------------------------------------------------
* Function to return a pointer to the data array on the host.
*/
realtype *N_VGetHostArrayPointer_OpenMPDEV(N_Vector v)
{
return((realtype *) NV_DATA_HOST_OMPDEV(v));
}
/* ----------------------------------------------------------------------------
* Function to return a pointer to the data array on the device.
*/
realtype *N_VGetDeviceArrayPointer_OpenMPDEV(N_Vector v)
{
return((realtype *) NV_DATA_DEV_OMPDEV(v));
}
/* ----------------------------------------------------------------------------
* Function to print a vector to stdout
*/
void N_VPrint_OpenMPDEV(N_Vector x)
{
N_VPrintFile_OpenMPDEV(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to outfile
*/
void N_VPrintFile_OpenMPDEV(N_Vector x, FILE *outfile)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMPDEV(x);
xd = NV_DATA_HOST_OMPDEV(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
STAN_SUNDIALS_FPRINTF(outfile, "%11.8Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]);
#else
STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]);
#endif
}
STAN_SUNDIALS_FPRINTF(outfile, "\n");
return;
}
/* ----------------------------------------------------------------------------
* Function to copy host array into device array
*/
void N_VCopyToDevice_OpenMPDEV(N_Vector x)
{
int dev, host;
sunindextype length;
realtype *host_ptr;
realtype *dev_ptr;
/* Get array information */
length = NV_LENGTH_OMPDEV(x);
host_ptr = NV_DATA_HOST_OMPDEV(x);
dev_ptr = NV_DATA_DEV_OMPDEV(x);
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Copy array from host to device */
omp_target_memcpy(dev_ptr, host_ptr, sizeof(realtype) * length, 0, 0, dev, host);
return;
}
/* ----------------------------------------------------------------------------
* Function to copy device array into host array
*/
void N_VCopyFromDevice_OpenMPDEV(N_Vector x)
{
int dev, host;
sunindextype length;
realtype *host_ptr;
realtype *dev_ptr;
/* Get array information */
length = NV_LENGTH_OMPDEV(x);
host_ptr = NV_DATA_HOST_OMPDEV(x);
dev_ptr = NV_DATA_DEV_OMPDEV(x);
/* Get device and host identifiers */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Copy array from device to host */
omp_target_memcpy(host_ptr, dev_ptr, sizeof(realtype) * length, 0, 0, host, dev);
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Create new vector from existing vector without attaching data
*/
N_Vector N_VCloneEmpty_OpenMPDEV(N_Vector w)
{
N_Vector v;
N_Vector_Ops ops;
N_VectorContent_OpenMPDEV content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = (N_Vector) malloc(sizeof *v);
if (v == NULL) return(NULL);
/* Create vector operation structure */
ops = NULL;
ops = (N_Vector_Ops) malloc(sizeof(struct _generic_N_Vector_Ops));
if (ops == NULL) { free(v); return(NULL); }
ops->nvgetvectorid = w->ops->nvgetvectorid;
ops->nvclone = w->ops->nvclone;
ops->nvcloneempty = w->ops->nvcloneempty;
ops->nvdestroy = w->ops->nvdestroy;
ops->nvspace = w->ops->nvspace;
ops->nvgetarraypointer = w->ops->nvgetarraypointer;
ops->nvsetarraypointer = w->ops->nvsetarraypointer;
/* standard vector operations */
ops->nvlinearsum = w->ops->nvlinearsum;
ops->nvconst = w->ops->nvconst;
ops->nvprod = w->ops->nvprod;
ops->nvdiv = w->ops->nvdiv;
ops->nvscale = w->ops->nvscale;
ops->nvabs = w->ops->nvabs;
ops->nvinv = w->ops->nvinv;
ops->nvaddconst = w->ops->nvaddconst;
ops->nvdotprod = w->ops->nvdotprod;
ops->nvmaxnorm = w->ops->nvmaxnorm;
ops->nvwrmsnormmask = w->ops->nvwrmsnormmask;
ops->nvwrmsnorm = w->ops->nvwrmsnorm;
ops->nvmin = w->ops->nvmin;
ops->nvwl2norm = w->ops->nvwl2norm;
ops->nvl1norm = w->ops->nvl1norm;
ops->nvcompare = w->ops->nvcompare;
ops->nvinvtest = w->ops->nvinvtest;
ops->nvconstrmask = w->ops->nvconstrmask;
ops->nvminquotient = w->ops->nvminquotient;
/* fused vector operations */
ops->nvlinearcombination = w->ops->nvlinearcombination;
ops->nvscaleaddmulti = w->ops->nvscaleaddmulti;
ops->nvdotprodmulti = w->ops->nvdotprodmulti;
/* vector array operations */
ops->nvlinearsumvectorarray = w->ops->nvlinearsumvectorarray;
ops->nvscalevectorarray = w->ops->nvscalevectorarray;
ops->nvconstvectorarray = w->ops->nvconstvectorarray;
ops->nvwrmsnormvectorarray = w->ops->nvwrmsnormvectorarray;
ops->nvwrmsnormmaskvectorarray = w->ops->nvwrmsnormmaskvectorarray;
ops->nvscaleaddmultivectorarray = w->ops->nvscaleaddmultivectorarray;
ops->nvlinearcombinationvectorarray = w->ops->nvlinearcombinationvectorarray;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMPDEV) malloc(sizeof(struct _N_VectorContent_OpenMPDEV));
if (content == NULL) { free(ops); free(v); return(NULL); }
content->length = NV_LENGTH_OMPDEV(w);
content->own_data = SUNFALSE;
content->host_data = NULL;
content->dev_data = NULL;
/* Attach content and ops */
v->content = content;
v->ops = ops;
return(v);
}
/* ----------------------------------------------------------------------------
* Create new vector from existing vector and attach data
*/
N_Vector N_VClone_OpenMPDEV(N_Vector w)
{
N_Vector v;
realtype *data;
realtype *dev_data;
sunindextype length;
int dev;
v = NULL;
v = N_VCloneEmpty_OpenMPDEV(w);
if (v == NULL) return(NULL);
length = NV_LENGTH_OMPDEV(w);
/* Create data */
if (length > 0) {
/* Allocate memory on host */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
/* Allocate memory on device */
dev = omp_get_default_device();
dev_data = omp_target_alloc(length * sizeof(realtype), dev);
if(data == NULL) { N_VDestroy_OpenMPDEV(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMPDEV(v) = SUNTRUE;
NV_DATA_HOST_OMPDEV(v)= data;
NV_DATA_DEV_OMPDEV(v) = dev_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Destroy vector and free vector memory
*/
void N_VDestroy_OpenMPDEV(N_Vector v)
{
int dev;
if (NV_OWN_DATA_OMPDEV(v) == SUNTRUE) {
/* Free host memory */
free(NV_DATA_HOST_OMPDEV(v));
NV_DATA_HOST_OMPDEV(v) = NULL;
/* Free device memory */
dev = omp_get_default_device();
omp_target_free(NV_DATA_DEV_OMPDEV(v), dev);
NV_DATA_DEV_OMPDEV(v) = NULL;
}
free(v->content); v->content = NULL;
free(v->ops); v->ops = NULL;
free(v); v = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Get storage requirement for N_Vector
*/
void N_VSpace_OpenMPDEV(N_Vector v, sunindextype *lrw, sunindextype *liw)
{
*lrw = NV_LENGTH_OMPDEV(v);
*liw = 1;
return;
}
/* ----------------------------------------------------------------------------
* Compute linear combination z[i] = a*x[i]+b*y[i]
*/
void N_VLinearSum_OpenMPDEV(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype c, *xd_dev, *yd_dev, *zd_dev;
N_Vector v1, v2;
booleantype test;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
Vaxpy_OpenMPDEV(a,x,y);
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
Vaxpy_OpenMPDEV(b,y,x);
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_OpenMPDEV(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_OpenMPDEV(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_OpenMPDEV(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_OpenMPDEV(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_OpenMPDEV(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_OpenMPDEV(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a,b) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])+(b*yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Assigns constant value to all vector elements, z[i] = c
*/
void N_VConst_OpenMPDEV(realtype c, N_Vector z)
{
sunindextype i, N;
realtype *zd_dev;
int dev;
zd_dev = NULL;
N = NV_LENGTH_OMPDEV(z);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++) zd_dev[i] = c;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]*yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]/yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaler multiplication z[i] = c*x[i]
*/
void N_VScale_OpenMPDEV(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
if (z == x) { /* BLAS usage: scale x <- cx */
VScaleBy_OpenMPDEV(c, x);
return;
}
if (c == ONE) {
VCopy_OpenMPDEV(x, z);
} else if (c == -ONE) {
VNeg_OpenMPDEV(x, z);
} else {
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*xd_dev[i];
}
return;
}
/* ----------------------------------------------------------------------------
* Compute absolute value of vector components z[i] = SUNRabs(x[i])
*/
void N_VAbs_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = SUNRabs(xd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = 1 / x[i]
*/
void N_VInv_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = ONE/xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise addition of a scaler to a vector z[i] = x[i] + b
*/
void N_VAddConst_OpenMPDEV(N_Vector x, realtype b, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,b) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]+b;
return;
}
/* ----------------------------------------------------------------------------
* Computes the dot product of two vectors, a = sum(x[i]*y[i])
*/
realtype N_VDotProd_OpenMPDEV(N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype sum, *xd_dev, *yd_dev;
int dev;
xd_dev = yd_dev = NULL;
sum = ZERO;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += xd_dev[i]*yd_dev[i];
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes max norm of a vector
*/
realtype N_VMaxNorm_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype max, *xd_dev;
int dev;
max = ZERO;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:max) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(max:max) schedule(static, 1)
for (i = 0; i < N; i++) {
max = SUNMAX(SUNRabs(xd_dev[i]), max);
}
return(max);
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a vector
*/
realtype N_VWrmsNorm_OpenMPDEV(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
return(SUNRsqrt(sum/N));
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a masked vector
*/
realtype N_VWrmsNormMask_OpenMPDEV(N_Vector x, N_Vector w, N_Vector id)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev, *idd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = idd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
idd_dev = NV_DATA_DEV_OMPDEV(id);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev, idd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
if (idd_dev[i] > ZERO) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
}
return(SUNRsqrt(sum / N));
}
/* ----------------------------------------------------------------------------
* Finds the minimun component of a vector
*/
realtype N_VMin_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype min, *xd_dev;
int dev;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(from:min) is_device_ptr(xd_dev) device(dev)
#pragma omp teams num_teams(1)
{
min = xd_dev[0];
#pragma omp distribute parallel for reduction(min:min) schedule(static, 1)
for (i = 1; i < N; i++) {
min = SUNMIN(xd_dev[i], min);
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted L2 norm of a vector
*/
realtype N_VWL2Norm_OpenMPDEV(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd_dev, *wd_dev;
int dev;
sum = ZERO;
xd_dev = wd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
wd_dev = NV_DATA_DEV_OMPDEV(w);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i < N; i++) {
sum += SUNSQR(xd_dev[i]*wd_dev[i]);
}
return(SUNRsqrt(sum));
}
/* ----------------------------------------------------------------------------
* Computes L1 norm of a vector
*/
realtype N_VL1Norm_OpenMPDEV(N_Vector x)
{
sunindextype i, N;
realtype sum, *xd_dev;
int dev;
sum = ZERO;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) map(tofrom:sum) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(+:sum) schedule(static, 1)
for (i = 0; i<N; i++)
sum += SUNRabs(xd_dev[i]);
return(sum);
}
/* ----------------------------------------------------------------------------
* Compare vector component values to a scaler
*/
void N_VCompare_OpenMPDEV(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (SUNRabs(xd_dev[i]) >= c) ? ONE : ZERO;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO
*/
booleantype N_VInvTest_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev, val;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
val = ZERO;
#pragma omp target map(to:N) map(tofrom:val) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(max:val) schedule(static, 1)
for (i = 0; i < N; i++) {
if (xd_dev[i] == ZERO)
val = ONE;
else
zd_dev[i] = ONE/xd_dev[i];
}
if (val > ZERO)
return (SUNFALSE);
else
return (SUNTRUE);
}
/* ----------------------------------------------------------------------------
* Compute constraint mask of a vector
*/
booleantype N_VConstrMask_OpenMPDEV(N_Vector c, N_Vector x, N_Vector m)
{
sunindextype i, N;
realtype temp;
realtype *cd_dev, *xd_dev, *md_dev;
int dev;
cd_dev = xd_dev = md_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
cd_dev = NV_DATA_DEV_OMPDEV(c);
md_dev = NV_DATA_DEV_OMPDEV(m);
/* get default device identifier */
dev = omp_get_default_device();
temp = ONE;
#pragma omp target map(to:N) map(tofrom:temp) is_device_ptr(xd_dev, cd_dev, md_dev) device(dev)
#pragma omp teams distribute parallel for reduction(min:temp) schedule(static, 1)
for (i = 0; i < N; i++) {
md_dev[i] = ZERO;
if (cd_dev[i] == ZERO) continue;
if (cd_dev[i] > ONEPT5 || cd_dev[i] < -ONEPT5) {
if ( xd_dev[i]*cd_dev[i] <= ZERO) { temp = ZERO; md_dev[i] = ONE; }
continue;
}
if ( cd_dev[i] > HALF || cd_dev[i] < -HALF) {
if (xd_dev[i]*cd_dev[i] < ZERO ) { temp = ZERO; md_dev[i] = ONE; }
}
}
if (temp == ONE) return (SUNTRUE);
else return(SUNFALSE);
}
/* ----------------------------------------------------------------------------
* Compute minimum componentwise quotient
*/
realtype N_VMinQuotient_OpenMPDEV(N_Vector num, N_Vector denom)
{
sunindextype i, N;
realtype *nd_dev, *dd_dev, min;
int dev;
nd_dev = dd_dev = NULL;
N = NV_LENGTH_OMPDEV(num);
nd_dev = NV_DATA_DEV_OMPDEV(num);
dd_dev = NV_DATA_DEV_OMPDEV(denom);
/* get default device identifier */
dev = omp_get_default_device();
min = BIG_REAL;
#pragma omp target map(to:N) map(tofrom:min) is_device_ptr(nd_dev, dd_dev) device(dev)
#pragma omp teams distribute parallel for reduction(min:min) schedule(static, 1)
for (i = 0; i < N; i++)
if (dd_dev[i] != ZERO) min = SUNMIN(nd_dev[i]/dd_dev[i], min);
return(min);
}
/*
* -----------------------------------------------------------------
* fused vector operations
* -----------------------------------------------------------------
*/
int N_VLinearCombination_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector z)
{
int i, dev;
realtype to_add; /* temporary variable to hold sum being added in atomic operation */
sunindextype j, N;
realtype* zd_dev=NULL;
realtype* xd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMPDEV(c[0], X[0], z);
return(0);
}
/* should have called N_VLinearSum */
if (nvec == 2) {
N_VLinearSum_OpenMPDEV(c[0], X[0], c[1], X[1], z);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(z);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store X dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
/*
* X[0] += c[i]*X[i], i = 1,...,nvec-1
*/
if ((X[0] == z) && (c[0] == ONE)) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
/*
* X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1
*/
if (X[0] == z) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev)
{
#pragma omp teams distribute parallel for schedule(static,1)
for (j=0; j<N; j++)
zd_dev[j] *= c[0];
}
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,zd_dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
/*
* z = sum{ c[i] * X[i] }, i = 0,...,nvec-1
*/
xd_dev = NV_DATA_DEV_OMPDEV(X[0]);
#pragma omp target map(to:N,c[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
{
#pragma omp teams distribute parallel for schedule(static, 1)
for (j=0; j<N; j++) {
zd_dev[j] = c[0] * xd_dev[j];
}
}
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=1; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++) {
to_add = c[i] * xd_dev[j];
#pragma omp atomic
zd_dev[j] += to_add;
}
}
}
free(xd_dev_ptrs);
return(0);
}
int N_VScaleAddMulti_OpenMPDEV(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMPDEV(a[0], x, ONE, Y[0], Z[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += a[i] * xd_dev[j];
}
}
free(yd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp target map(to:N,nvec,a[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = a[i] * xd_dev[j] + yd_dev[j];
}
}
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VDotProdMulti_OpenMPDEV(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype** yd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VDotProd */
if (nvec == 1) {
dotprods[0] = N_VDotProd_OpenMPDEV(x, Y[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize dot products */
for (i=0; i<nvec; i++) {
dotprods[i] = ZERO;
}
/* Allocate and store dev pointers to copy to device */
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
/* compute multiple dot products */
#pragma omp target map(to:N,nvec,yd_dev_ptrs[:nvec]) map(tofrom:dotprods[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
for (i=0; i<nvec; i++) {
yd_dev = yd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
for (j=0; j<N; j++)
sum += xd_dev[j] * yd_dev[j];
dotprods[i] += sum;
}
free(yd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* vector array operations
* -----------------------------------------------------------------
*/
int N_VLinearSumVectorArray_OpenMPDEV(int nvec,
realtype a, N_Vector* X,
realtype b, N_Vector* Y,
N_Vector* Z)
{
int i, dev;
sunindextype j, N;
N_Vector* V1;
N_Vector* V2;
booleantype test;
realtype c;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMPDEV(a, X[0], b, Y[0], Z[0]);
return(0);
}
/* BLAS usage: axpy y <- ax+y */
if ((b == ONE) && (Z == Y))
return(VaxpyVectorArray_OpenMPDEV(nvec, a, X, Y));
/* BLAS usage: axpy x <- by+x */
if ((a == ONE) && (Z == X))
return(VaxpyVectorArray_OpenMPDEV(nvec, b, Y, X));
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE))
return(VSumVectorArray_OpenMPDEV(nvec, X, Y, Z));
/* Cases: */
/* (1) a == 1.0, b = -1.0, */
/* (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VDiffVectorArray_OpenMPDEV(nvec, V2, V1, Z));
}
/* Cases: */
/* (1) a == 1.0, b == other or 0.0, */
/* (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin1VectorArray_OpenMPDEV(nvec, c, V1, V2, Z));
}
/* Cases: */
/* (1) a == -1.0, b != 1.0, */
/* (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin2VectorArray_OpenMPDEV(nvec, c, V1, V2, Z));
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b)
return(VScaleSumVectorArray_OpenMPDEV(nvec, a, X, Y, Z));
/* Case: a == -b */
if (a == -b)
return(VScaleDiffVectorArray_OpenMPDEV(nvec, a, X, Y, Z));
/* Do all cases not handled above: */
/* (1) a == other, b == 0.0 - user should have called N_VScale */
/* (2) a == 0.0, b == other - user should have called N_VScale */
/* (3) a,b == other, a !=b, a != -b */
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/* compute linear sum for each vector pair in vector arrays */
#pragma omp target map(to:N,nvec,a,b,xd_dev_ptrs[:nvec], yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = a * xd_dev[j] + b * yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VScaleVectorArray_OpenMPDEV(int nvec, realtype* c, N_Vector* X, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMPDEV(c[0], X[0], Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++) {
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
}
/*
* X[i] *= c[i]
*/
if (X == Z) {
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
xd_dev[j] *= c[i];
}
}
free(xd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/*
* Z[i] = c[i] * X[i]
*/
#pragma omp target map(to:N,nvec,c[:nvec],xd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c[i] * xd_dev[j];
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VConstVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* zd_dev=NULL;
realtype** zd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VConst */
if (nvec == 1) {
N_VConst_OpenMPDEV(c, Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get device */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
/* set each vector in the vector array to a constant */
#pragma omp target map(to:N,nvec,zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c;
}
}
free(zd_dev_ptrs);
return(0);
}
int N_VWrmsNormVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W, realtype* nrm)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* wd_dev=NULL;
realtype* xd_dev=NULL;
realtype** wd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNorm_OpenMPDEV(X[0], W[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize norms */
for (i=0; i<nvec; i++)
nrm[i] = ZERO;
/* Allocate and store dev pointers to copy to device */
wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]);
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
/* compute the WRMS norm for each vector in the vector array */
#pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \
is_device_ptr(xd_dev, wd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
wd_dev = wd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
{
for (j=0; j<N; j++)
sum += SUNSQR(xd_dev[j] * wd_dev[j]);
}
nrm[i] = SUNRsqrt(sum/N);
}
}
free(wd_dev_ptrs);
free(xd_dev_ptrs);
return(0);
}
int N_VWrmsNormMaskVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* W,
N_Vector id, realtype* nrm)
{
int i, dev;
sunindextype j, N;
realtype sum;
realtype* wd_dev=NULL;
realtype* xd_dev=NULL;
realtype* idd_dev=NULL;
realtype** wd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNormMask_OpenMPDEV(X[0], W[0], id);
return(0);
}
/* get vector length and mask data array */
N = NV_LENGTH_OMPDEV(X[0]);
idd_dev = NV_DATA_DEV_OMPDEV(id);
/* get default device identifier */
dev = omp_get_default_device();
/* initialize norms */
for (i=0; i<nvec; i++)
nrm[i] = ZERO;
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
wd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
wd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(W[i]);
/* compute the WRMS norm for each vector in the vector array */
#pragma omp target map(to:N,nvec,xd_dev_ptrs[:nvec],wd_dev_ptrs[:nvec]) map(tofrom:nrm[:nvec]) \
is_device_ptr(idd_dev,xd_dev,wd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
wd_dev = wd_dev_ptrs[i];
sum = ZERO;
#pragma omp parallel for reduction(+:sum) schedule(static, 1)
{
for (j=0; j<N; j++) {
if (idd_dev[j] > ZERO)
sum += SUNSQR(xd_dev[j] * wd_dev[j]);
}
}
nrm[i] = SUNRsqrt(sum/N);
}
}
free(xd_dev_ptrs);
free(wd_dev_ptrs);
return(0);
}
int N_VScaleAddMultiVectorArray_OpenMPDEV(int nvec, int nsum, realtype* a,
N_Vector* X, N_Vector** Y, N_Vector** Z)
{
int i, j, dev;
sunindextype k, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
int retval;
N_Vector* YY;
N_Vector* ZZ;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VLinearSum */
if (nsum == 1) {
N_VLinearSum_OpenMPDEV(a[0], X[0], ONE, Y[0][0], Z[0][0]);
return(0);
}
/* should have called N_VScaleAddMulti */
YY = (N_Vector *) malloc(nsum * sizeof(N_Vector));
ZZ = (N_Vector *) malloc(nsum * sizeof(N_Vector));
for (j=0; j<nsum; j++) {
YY[j] = Y[j][0];
ZZ[j] = Z[j][0];
}
retval = N_VScaleAddMulti_OpenMPDEV(nsum, a, X[0], YY, ZZ);
free(YY);
free(ZZ);
return(retval);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VLinearSumVectorArray */
if (nsum == 1) {
retval = N_VLinearSumVectorArray_OpenMPDEV(nvec, a[0], X, ONE, Y[0], Z[0]);
return(retval);
}
/* ----------------------------
* Compute multiple linear sums
* ---------------------------- */
/* get vector length */
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++) {
for (j=0; j<nsum; j++)
yd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Y[j][i]);
}
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum]) \
is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
for (j=0; j<nsum; j++) {
yd_dev = yd_dev_ptrs[i*nsum+j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
yd_dev[k] += a[j] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (i=0; i<nvec; i++) {
for (j=0; j<nsum; j++)
zd_dev_ptrs[i * nsum + j] = NV_DATA_DEV_OMPDEV(Z[j][i]);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp target map(to:N,nvec,nsum,a[:nsum],xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec*nsum]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
for (j=0; j<nsum; j++) {
yd_dev = yd_dev_ptrs[i*nsum+j];
zd_dev = zd_dev_ptrs[i*nsum+j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] = a[j] * xd_dev[k] + yd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
int N_VLinearCombinationVectorArray_OpenMPDEV(int nvec, int nsum,
realtype* c,
N_Vector** X,
N_Vector* Z)
{
int i; /* vector arrays index in summation [0,nsum) */
int j; /* vector index in vector array [0,nvec) */
sunindextype k; /* element index in vector [0,N) */
sunindextype N;
realtype* zd_dev=NULL;
realtype* xd_dev=NULL;
realtype** zd_dev_ptrs=NULL;
realtype** xd_dev_ptrs=NULL;
int dev;
realtype* ctmp;
N_Vector* Y;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VScale */
if (nsum == 1) {
N_VScale_OpenMPDEV(c[0], X[0][0], Z[0]);
return(0);
}
/* should have called N_VLinearSum */
if (nsum == 2) {
N_VLinearSum_OpenMPDEV(c[0], X[0][0], c[1], X[1][0], Z[0]);
return(0);
}
/* should have called N_VLinearCombination */
Y = (N_Vector *) malloc(nsum * sizeof(N_Vector));
for (i=0; i<nsum; i++) {
Y[i] = X[i][0];
}
N_VLinearCombination_OpenMPDEV(nsum, c, Y, Z[0]);
free(Y);
return(0);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VScaleVectorArray */
if (nsum == 1) {
ctmp = (realtype*) malloc(nvec * sizeof(realtype));
for (j=0; j<nvec; j++) {
ctmp[j] = c[0];
}
N_VScaleVectorArray_OpenMPDEV(nvec, ctmp, X[0], Z);
free(ctmp);
return(0);
}
/* should have called N_VLinearSumVectorArray */
if (nsum == 2) {
N_VLinearSumVectorArray_OpenMPDEV(nvec, c[0], X[0], c[1], X[1], Z);
return(0);
}
/* --------------------------
* Compute linear combination
* -------------------------- */
/* get vector length */
N = NV_LENGTH_OMPDEV(Z[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
xd_dev_ptrs = (realtype**) malloc(nvec * nsum * sizeof(realtype*));
for (j=0; j<nvec; j++)
zd_dev_ptrs[j] = NV_DATA_DEV_OMPDEV(Z[j]);
for (j=0; j<nvec; j++) {
for (i=0; i<nsum; i++)
xd_dev_ptrs[j * nsum + i] = NV_DATA_DEV_OMPDEV(X[i][j]);
}
/*
* X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1
*/
if ((X[0] == Z) && (c[0] == ONE)) {
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
zd_dev = zd_dev_ptrs[j];
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1
*/
if (X[0] == Z) {
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
zd_dev = zd_dev_ptrs[j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] *= c[0];
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1
*/
#pragma omp target map(to:N,nvec,c[:nsum],xd_dev_ptrs[:nvec*nsum],zd_dev_ptrs[:nvec]) \
is_device_ptr(zd_dev) device(dev)
#pragma omp teams distribute
{
for (j=0; j<nvec; j++) {
/* scale first vector in the sum into the output vector */
xd_dev = xd_dev_ptrs[j*nsum];
zd_dev = zd_dev_ptrs[j];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] = c[0] * xd_dev[k];
/* scale and sum remaining vectors into the output vector */
for (i=1; i<nsum; i++) {
xd_dev = xd_dev_ptrs[j*nsum+i];
#pragma omp parallel for schedule(static, 1)
for (k=0; k<N; k++)
zd_dev[k] += c[i] * xd_dev[k];
}
}
}
free(xd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* private functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Copy vector components into a second vector
*/
static void VCopy_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum
*/
static void VSum_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]+yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference
*/
static void VDiff_OpenMPDEV(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = xd_dev[i]-yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute the negative of a vector
*/
static void VNeg_OpenMPDEV(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *zd_dev;
int dev;
xd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N) is_device_ptr(xd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = -xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector sum
*/
static void VScaleSum_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*(xd_dev[i]+yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector difference
*/
static void VScaleDiff_OpenMPDEV(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,c) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = c*(xd_dev[i]-yd_dev[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum z[i] = a*x[i]+y[i]
*/
static void VLin1_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])+yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference z[i] = a*x[i]-y[i]
*/
static void VLin2_OpenMPDEV(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev, *zd_dev;
int dev;
xd_dev = yd_dev = zd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
zd_dev = NV_DATA_DEV_OMPDEV(z);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
zd_dev[i] = (a*xd_dev[i])-yd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute special cases of linear sum
*/
static void Vaxpy_OpenMPDEV(realtype a, N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype *xd_dev, *yd_dev;
int dev;
xd_dev = yd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
yd_dev = NV_DATA_DEV_OMPDEV(y);
/* get default device identifier */
dev = omp_get_default_device();
if (a == ONE) {
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] += xd_dev[i];
return;
}
if (a == -ONE) {
#pragma omp target map(to:N) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] -= xd_dev[i];
return;
}
#pragma omp target map(to:N,a) is_device_ptr(xd_dev, yd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
yd_dev[i] += a*xd_dev[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector x[i] = a*x[i]
*/
static void VScaleBy_OpenMPDEV(realtype a, N_Vector x)
{
sunindextype i, N;
realtype *xd_dev;
int dev;
xd_dev = NULL;
N = NV_LENGTH_OMPDEV(x);
xd_dev = NV_DATA_DEV_OMPDEV(x);
/* get default device identifier */
dev = omp_get_default_device();
#pragma omp target map(to:N,a) is_device_ptr(xd_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i = 0; i < N; i++)
xd_dev[i] *= a;
return;
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector array operations
* -----------------------------------------------------------------
*/
static int VSumVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev, yd_dev, zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = xd_dev[j] + yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VDiffVectorArray_OpenMPDEV(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = xd_dev[j] - yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VScaleSumVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c * (xd_dev[j] + yd_dev[j]);
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VScaleDiffVectorArray_OpenMPDEV(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev ointer to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = c * (xd_dev[j] - yd_dev[j]);
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VLin1VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = (a * xd_dev[j]) + yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VLin2VectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype* zd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
realtype** zd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
zd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
for (i=0; i<nvec; i++)
zd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Z[i]);
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec],zd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev,zd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
zd_dev = zd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
zd_dev[j] = (a * xd_dev[j]) - yd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
free(zd_dev_ptrs);
return(0);
}
static int VaxpyVectorArray_OpenMPDEV(int nvec, realtype a, N_Vector* X, N_Vector* Y)
{
int i, dev;
sunindextype j, N;
realtype* xd_dev=NULL;
realtype* yd_dev=NULL;
realtype** xd_dev_ptrs=NULL;
realtype** yd_dev_ptrs=NULL;
N = NV_LENGTH_OMPDEV(X[0]);
/* get default device identifier */
dev = omp_get_default_device();
/* Allocate and store dev pointers to copy to device */
xd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
yd_dev_ptrs = (realtype**) malloc(nvec * sizeof(realtype*));
for (i=0; i<nvec; i++)
xd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(X[i]);
for (i=0; i<nvec; i++)
yd_dev_ptrs[i] = NV_DATA_DEV_OMPDEV(Y[i]);
if (a == ONE) {
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
if (a == -ONE) {
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] -= xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
#pragma omp target map(to:N,xd_dev_ptrs[:nvec],yd_dev_ptrs[:nvec]) \
is_device_ptr(xd_dev,yd_dev) device(dev)
#pragma omp teams distribute
{
for (i=0; i<nvec; i++) {
xd_dev = xd_dev_ptrs[i];
yd_dev = yd_dev_ptrs[i];
#pragma omp parallel for schedule(static, 1)
for (j=0; j<N; j++)
yd_dev[j] += a * xd_dev[j];
}
}
free(xd_dev_ptrs);
free(yd_dev_ptrs);
return(0);
}
/*
* -----------------------------------------------------------------
* Enable / Disable fused and vector array operations
* -----------------------------------------------------------------
*/
int N_VEnableFusedOps_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
if (tf) {
/* enable all fused vector operations */
v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV;
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV;
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV;
/* enable all vector array operations */
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV;
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV;
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV;
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV;
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV;
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV;
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV;
} else {
/* disable all fused vector operations */
v->ops->nvlinearcombination = NULL;
v->ops->nvscaleaddmulti = NULL;
v->ops->nvdotprodmulti = NULL;
/* disable all vector array operations */
v->ops->nvlinearsumvectorarray = NULL;
v->ops->nvscalevectorarray = NULL;
v->ops->nvconstvectorarray = NULL;
v->ops->nvwrmsnormvectorarray = NULL;
v->ops->nvwrmsnormmaskvectorarray = NULL;
v->ops->nvscaleaddmultivectorarray = NULL;
v->ops->nvlinearcombinationvectorarray = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearCombination_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombination = N_VLinearCombination_OpenMPDEV;
else
v->ops->nvlinearcombination = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMulti_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMPDEV;
else
v->ops->nvscaleaddmulti = NULL;
/* return success */
return(0);
}
int N_VEnableDotProdMulti_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMPDEV;
else
v->ops->nvdotprodmulti = NULL;
/* return success */
return(0);
}
int N_VEnableLinearSumVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMPDEV;
else
v->ops->nvlinearsumvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMPDEV;
else
v->ops->nvscalevectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableConstVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMPDEV;
else
v->ops->nvconstvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMPDEV;
else
v->ops->nvwrmsnormvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormMaskVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMPDEV;
else
v->ops->nvwrmsnormmaskvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMultiVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMPDEV;
else
v->ops->nvscaleaddmultivectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableLinearCombinationVectorArray_OpenMPDEV(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMPDEV;
else
v->ops->nvlinearcombinationvectorarray = NULL;
/* return success */
return(0);
}
|
omp_coarse.h | /*
Copyright 2014, Jernej Kovacic
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* @file
* @author Jernej Kovacic
*
* Useful inline functions that facilitate
* application of coarse-grained parallelism.
*/
#ifndef _OMP_COARSE_H_
#define _OMP_COARSE_H_
#include <cstddef>
#include "../settings/omp_settings.h"
/**
* Ideal number of threads (if permitted by the number of
* available CPU cores) to perform a coarse-grained
* parallelized task
*
* @param n - number of all elements
* @param per_thread - ideal number of elements to be processed by a single thread
*
* @return ideal number of threads
*/
inline std::size_t ompIdealNrThreads( const std::size_t n, const std::size_t per_thread )
{
return (n / per_thread) + ( 0 == (n % per_thread) ? 0 : 1 );
}
/**
* Same as 'ompIdealNrThreads', assumes the application default value
* for the ideal number of elements to be processed by a single thread
* (OMP_CHUNKS_PER_THREAD, defined in settings/omp_settings.h).
*
* @param n - number of all elements
*
* @return ideal number of threads for the default number of elements per thread (OMP_CHUNKS_PER_THREAD)
*
* @see ompIdealNrThreads
*/
inline std::size_t ompIdeal( const std::size_t n )
{
return ompIdealNrThreads(n, OMP_CHUNKS_PER_THREAD);
}
/**
* A convenience macro (unfortunately C and C++ do not allow a more elegant
* way to accomplish this) that initializes necessary variables
* used at coarse level parallelization.
*
* It declares (as "const std::size_t") and initializes the following variables
* that can be further used by parallelized algorithms:
* - thrnr: number of the current thread
* - nthreads: number of all allocated threads
* - elems_per_thread: maximum number of elements processed by a thread
* - istart: index to the first element of the range, processed by the current thread
* - iend: index to the final element of the range, processed by the current thread
*
* The macro can only be used within the "#pragma omp parallel" blocks
* that actually implement coarse grained parallelization!
*
* The following headers are assumed to be included beforehand:
* - #include <cstddef> (for the definition of size_t)
* - #include <algorithm> (for the definition of std::min)
* - #include "omp/omp_header.h" (for the definition of omp_get_thread_num and omp_get_num_threads)
*
* @param N - number of all elements to be processed by threads (must be a size_t value)
*/
#define OMP_COARSE_GRAINED_PAR_INIT_VARS( N ) \
const std::size_t thrnr = omp_get_thread_num(); \
const std::size_t nthreads = omp_get_num_threads(); \
\
const std::size_t elems_per_thread = \
( std::min<std::size_t>( (N), static_cast<std::size_t>(-1) - nthreads + 1 ) \
+ nthreads - 1) / nthreads; \
\
const std::size_t istart = elems_per_thread * thrnr; \
const std::size_t iend = \
std::min<std::size_t>( ( N ), \
( elems_per_thread <= (static_cast<std::size_t>(-1)-istart) ? \
istart + elems_per_thread : \
static_cast<std::size_t>(-1) ) );
#endif /* _OMP_COARSE_H_ */
|
GB_binop__isge_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_08__isge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__isge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_04__isge_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isge_fp64)
// A*D function (colscale): GB (_AxD__isge_fp64)
// D*A function (rowscale): GB (_DxB__isge_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__isge_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__isge_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isge_fp64)
// C=scalar+B GB (_bind1st__isge_fp64)
// C=scalar+B' GB (_bind1st_tran__isge_fp64)
// C=A+scalar GB (_bind2nd__isge_fp64)
// C=A'+scalar GB (_bind2nd_tran__isge_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x >= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISGE || GxB_NO_FP64 || GxB_NO_ISGE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isge_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__isge_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isge_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isge_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isge_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB (_bind1st_tran__isge_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB (_bind2nd_tran__isge_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__minv_uint8_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_uint8_int8
// op(A') function: GB_tran__minv_uint8_int8
// C type: uint8_t
// A type: int8_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = GB_IMINV_UNSIGNED (aij, 8)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_UNSIGNED (x, 8) ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_UINT8 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_uint8_int8
(
uint8_t *Cx, // Cx and Ax may be aliased
int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_uint8_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rose_Stress-1.c | //#include <float.h>
//#include <math.h>
#define MIN(a, b) ( (a < b) ? a : b)
#define MAX(a, b) ( (a > b) ? a : b)
#include <omp.h>
typedef double real8;
void StressCheckEpsFail(real8 *newSxx,real8 *newSyy,real8 *newSzz,real8 *newTxy,real8 *newTxz,real8 *newTyz,real8 *eps,real8 eps_failure_model,const int *zoneset,int length)
{
int i;
int index;
#pragma omp parallel for private (index,i) firstprivate (eps_failure_model,length)
for (i = 0; i <= length - 1; i += 1) {
index = zoneset[i];
if (eps[zoneset[i]] > eps_failure_model) {
newSxx[i] = 0.0;
newSyy[i] = 0.0;
newSzz[i] = 0.0;
newTxy[i] = 0.0;
newTxz[i] = 0.0;
newTyz[i] = 0.0;
eps[zoneset[i]] = eps_failure_model * 1.01;
}
}
}
void StressStrainWork(real8 *deltz,real8 *delts,const real8 *newSxx,const real8 *newSyy,const real8 *newSzz,const real8 *newTxy,const real8 *newTxz,const real8 *newTyz,const real8 *sxx,const real8 *syy,const real8 *txy,const real8 *txz,const real8 *tyz,const real8 *dxx,const real8 *dyy,const real8 *dzz,const real8 *dxy,const real8 *dxz,const real8 *dyz,real8 deltaTime,const int *zoneset,const real8 *vc,const real8 *vnewc,int length)
{
int i;
int index;
real8 quarterDelta = 0.25 * deltaTime;
real8 szz;
#pragma omp parallel for private (index,szz,i) firstprivate (length,quarterDelta)
for (i = 0; i <= length - 1; i += 1) {
index = zoneset[i];
szz = -sxx[zoneset[i]] - syy[zoneset[i]];
deltz[zoneset[i]] += quarterDelta * (vnewc[i] + vc[i]) * (dxx[zoneset[i]] * (sxx[zoneset[i]] + newSxx[i]) + dyy[zoneset[i]] * (syy[zoneset[i]] + newSyy[i]) + dzz[zoneset[i]] * (szz + newSzz[i]) + 2. * dxy[zoneset[i]] * (txy[zoneset[i]] + newTxy[i]) + 2. * dxz[zoneset[i]] * (txz[zoneset[i]] + newTxz[i]) + 2. * dyz[zoneset[i]] * (tyz[zoneset[i]] + newTyz[i]));
delts[i] += quarterDelta * (vnewc[i] + vc[i]) * (dxx[zoneset[i]] * sxx[zoneset[i]] + dyy[zoneset[i]] * syy[zoneset[i]] + dzz[zoneset[i]] * szz + 2. * dxy[zoneset[i]] * txy[zoneset[i]] + 2. * dxz[zoneset[i]] * txz[zoneset[i]] + 2. * dyz[zoneset[i]] * tyz[zoneset[i]]);
}
}
void StressStrainHeat(const real8 *deltz,real8 *deltzh,real8 *deltrh,const real8 *shearMod,const real8 *shearRatio,const real8 *shearDer,const real8 *newSxx,const real8 *newSyy,const real8 *newSzz,const real8 *newTxy,const real8 *newTxz,const real8 *newTyz,const real8 *sxx,const real8 *syy,const real8 *txy,const real8 *txz,const real8 *tyz,real8 deltaTime,const int *zoneset,const real8 *vc,const real8 *vnewc,int length)
{
real8 shearr;
real8 sheari;
real8 avgMod;
int nz;
int i;
deltaTime = deltaTime;
/* Quiet the compiler - unused argument */
#pragma omp parallel for private (shearr,sheari,avgMod,nz,i) firstprivate (length)
for (i = 0; i <= length - 1; i += 1) {
nz = zoneset[i];
shearr = 0.5 * shearRatio[i];
if (shearMod[zoneset[i]] > 0.) {
sheari = 0.5 / shearMod[zoneset[i]];
deltrh[zoneset[i]] = .25 * (vnewc[i] + vc[i]) * ((newSxx[i] * sheari - sxx[zoneset[i]] * shearr) * (sxx[zoneset[i]] + newSxx[i]) + (newSyy[i] * sheari - syy[zoneset[i]] * shearr) * (syy[zoneset[i]] + newSyy[i]) + (newSzz[i] * sheari + (syy[zoneset[i]] + sxx[zoneset[i]]) * shearr) * (newSzz[i] - sxx[zoneset[i]] - syy[zoneset[i]]) + 2. * (newTxy[i] * sheari - txy[zoneset[i]] * shearr) * (txy[zoneset[i]] + newTxy[i]) + 2. * (newTxz[i] * sheari - txz[zoneset[i]] * shearr) * (txz[zoneset[i]] + newTxz[i]) + 2. * (newTyz[i] * sheari - tyz[zoneset[i]] * shearr) * (tyz[zoneset[i]] + newTyz[i]));
}
else {
deltrh[zoneset[i]] = - .25 * (vnewc[i] + vc[i]) * (sxx[zoneset[i]] * (sxx[zoneset[i]] + newSxx[i]) + syy[zoneset[i]] * (syy[zoneset[i]] + newSyy[i]) - (syy[zoneset[i]] + sxx[zoneset[i]]) * (newSzz[i] - sxx[zoneset[i]] - syy[zoneset[i]]) + 2. * txy[zoneset[i]] * (txy[zoneset[i]] + newTxy[i]) + 2. * txz[zoneset[i]] * (txz[zoneset[i]] + newTxz[i]) + 2. * tyz[zoneset[i]] * (tyz[zoneset[i]] + newTyz[i])) * shearr;
}
deltzh[zoneset[i]] = deltz[zoneset[i]] - deltrh[zoneset[i]];
avgMod = 0.5 * shearMod[zoneset[i]];
if (shearRatio[i] > 0.0)
avgMod = avgMod + 0.5 / shearRatio[i];
if (avgMod > 0.0)
deltrh[zoneset[i]] = shearDer[i] * deltrh[zoneset[i]] / avgMod;
else
deltrh[zoneset[i]] = 0.0;
}
}
|
GB_binop__isne_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__isne_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_int64)
// A*D function (colscale): GB (_AxD__isne_int64)
// D*A function (rowscale): GB (_DxB__isne_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_int64)
// C=scalar+B GB (_bind1st__isne_int64)
// C=scalar+B' GB (_bind1st_tran__isne_int64)
// C=A+scalar GB (_bind2nd__isne_int64)
// C=A'+scalar GB (_bind2nd_tran__isne_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_INT64 || GxB_NO_ISNE_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isne_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p,
*magick_restrict q;
Quantum
*magick_restrict r;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance,
pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
pixel=(double) p[i]-GetPixelChannel(reconstruct_image,channel,q);
else
pixel=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
distance=pixel*pixel;
if (distance >= fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
image->error.mean_error_per_pixel=area*distortion[CompositePixelChannel];
image->error.normalized_mean_error=area*QuantumScale*QuantumScale*mean_error;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*(p[i]-GetPixelChannel(reconstruct_image,
channel,q));
else
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
{
distortion[i]+=area*QuantumScale*(p[i]-
image_statistics[channel].mean)*(GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
else
{
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
GetImageChannels(image));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
distance=QuantumScale*fabs((double) (p[i]-(double)
GetPixelChannel(reconstruct_image,channel,q)));
else
distance=QuantumScale*fabs((double) (Sa*p[i]-Da*
GetPixelChannel(reconstruct_image,channel,q)));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=10.0*MagickLog10(1.0)-10.0*MagickLog10(distortion[i]);
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
area,
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Compute structural similarity index @
https://en.wikipedia.org/wiki/Structural_similarity.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
area=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,reconstruct_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
i,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) kernel_info->width/2L),y-
((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,-((ssize_t) kernel_info->width/
2L),y-((ssize_t) kernel_info->height/2L),columns+kernel_info->width,
kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) memset(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
x_pixel_mu[MaxPixelChannels+1],
x_pixel_sigma_squared[MaxPixelChannels+1],
xy_sigma[MaxPixelChannels+1],
y_pixel_mu[MaxPixelChannels+1],
y_pixel_sigma_squared[MaxPixelChannels+1];
const Quantum
*magick_restrict reference,
*magick_restrict target;
MagickRealType
*k;
ssize_t
v;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
(void) memset(x_pixel_mu,0,sizeof(x_pixel_mu));
(void) memset(x_pixel_sigma_squared,0,sizeof(x_pixel_sigma_squared));
(void) memset(xy_sigma,0,sizeof(xy_sigma));
(void) memset(x_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
(void) memset(y_pixel_mu,0,sizeof(y_pixel_mu));
(void) memset(y_pixel_sigma_squared,0,sizeof(y_pixel_sigma_squared));
k=kernel_info->values;
reference=p;
target=q;
for (v=0; v < (ssize_t) kernel_info->height; v++)
{
ssize_t
u;
for (u=0; u < (ssize_t) kernel_info->width; u++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
x_pixel,
y_pixel;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel=QuantumScale*reference[i];
x_pixel_mu[i]+=(*k)*x_pixel;
x_pixel_sigma_squared[i]+=(*k)*x_pixel*x_pixel;
y_pixel=QuantumScale*
GetPixelChannel(reconstruct_image,channel,target);
y_pixel_mu[i]+=(*k)*y_pixel;
y_pixel_sigma_squared[i]+=(*k)*y_pixel*y_pixel;
xy_sigma[i]+=(*k)*x_pixel*y_pixel;
}
k++;
reference+=GetPixelChannels(image);
target+=GetPixelChannels(reconstruct_image);
}
reference+=GetPixelChannels(image)*columns;
target+=GetPixelChannels(reconstruct_image)*columns;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
ssim,
x_pixel_mu_squared,
x_pixel_sigmas_squared,
xy_mu,
xy_sigmas,
y_pixel_mu_squared,
y_pixel_sigmas_squared;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
x_pixel_mu_squared=x_pixel_mu[i]*x_pixel_mu[i];
y_pixel_mu_squared=y_pixel_mu[i]*y_pixel_mu[i];
xy_mu=x_pixel_mu[i]*y_pixel_mu[i];
xy_sigmas=xy_sigma[i]-xy_mu;
x_pixel_sigmas_squared=x_pixel_sigma_squared[i]-x_pixel_mu_squared;
y_pixel_sigmas_squared=y_pixel_sigma_squared[i]-y_pixel_mu_squared;
ssim=((2.0*xy_mu+c1)*(2.0*xy_sigmas+c2))/
((x_pixel_mu_squared+y_pixel_mu_squared+c1)*
(x_pixel_sigmas_squared+y_pixel_sigmas_squared+c2));
channel_distortion[i]+=ssim;
channel_distortion[CompositePixelChannel]+=ssim;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]+=channel_distortion[i];
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[i]/=area;
}
distortion[CompositePixelChannel]/=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs((double) (p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q)));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) \
shared(progress,status,similarity_metric) \
magick_number_threads(image,image,image->rows-reference->rows+1,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SimilarityImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
GB_binop__isle_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__isle_uint16
// A.*B function (eWiseMult): GB_AemultB__isle_uint16
// A*D function (colscale): GB_AxD__isle_uint16
// D*A function (rowscale): GB_DxB__isle_uint16
// C+=B function (dense accum): GB_Cdense_accumB__isle_uint16
// C+=b function (dense accum): GB_Cdense_accumb__isle_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isle_uint16
// C=scalar+B GB_bind1st__isle_uint16
// C=scalar+B' GB_bind1st_tran__isle_uint16
// C=A+scalar GB_bind2nd__isle_uint16
// C=A'+scalar GB_bind2nd_tran__isle_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x <= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_UINT16 || GxB_NO_ISLE_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__isle_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__isle_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__isle_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__isle_uint16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__isle_uint16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__isle_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__isle_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__isle_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__isle_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB_bind1st_tran__isle_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB_bind2nd_tran__isle_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
csr_matmultivec.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "csr_multimatvec.h"
#include "seq_mv.h"
#include "seq_multivector.h"
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMultiMatvec
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatMultivec(HYPRE_Complex alpha, hypre_CSRMatrix *A,
hypre_Multivector *x, HYPRE_Complex beta,
hypre_Multivector *y)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_MultivectorData(x);
HYPRE_Complex *y_data = hypre_MultivectorData(y);
HYPRE_Int x_size = hypre_MultivectorSize(x);
HYPRE_Int y_size = hypre_MultivectorSize(y);
HYPRE_Int num_vectors = hypre_MultivectorNumVectors(x);
HYPRE_Int *x_active_ind= x->active_indices;
HYPRE_Int *y_active_ind= y->active_indices;
HYPRE_Int num_active_vectors = x->num_active_vectors;
HYPRE_Int i, j, jj, m, ierr = 0, optimize;
HYPRE_Complex temp, tempx, xpar=0.7, *xptr, *yptr;
/*---------------------------------------------------------------------
* Check for size compatibility. Matvec returns ierr = 1 if
* length of X doesn't equal the number of columns of A,
* ierr = 2 if the length of Y doesn't equal the number of rows
* of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in Matvec, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert(num_active_vectors == y->num_active_vectors);
if (num_cols != x_size) ierr = 1;
if (num_rows != y_size) ierr = 2;
if (num_cols != x_size && num_rows != y_size) ierr = 3;
optimize = 0;
if (num_active_vectors == num_vectors && num_vectors == y->num_vectors)
optimize = 1;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++) y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A*x
*-----------------------------------------------------------------*/
if ( num_vectors==1 )
{
for (i = 0; i < num_rows; i++)
{
temp = y_data[i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
temp += A_data[jj] * x_data[A_j[jj]];
y_data[i] = temp;
}
}
else
{
if (optimize == 0)
{
for (i = 0; i < num_rows; i++)
{
for (j=0; j<num_active_vectors; ++j)
{
xptr = x_data[x_active_ind[j]*x_size];
temp = y_data[y_active_ind[j]*y_size+i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
temp += A_data[jj] * xptr[A_j[jj]];
y_data[y_active_ind[j]*y_size+i] = temp;
}
}
}
else
{
for (i = 0; i < num_rows; i++)
{
for (j=0; j<num_vectors; ++j)
{
xptr = x_data[j*x_size];
temp = y_data[j*y_size+i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
temp += A_data[jj] * xptr[A_j[jj]];
y_data[j*y_size+i] = temp;
}
}
/* different version
for (j=0; j<num_vectors; ++j)
{
xptr = x_data[j*x_size];
for (i = 0; i < num_rows; i++)
{
temp = y_data[j*y_size+i];
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
temp += A_data[jj] * xptr[A_j[jj]];
y_data[j*y_size+i] = temp;
}
}
*/
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMultiMatvecT
*
* Performs y <- alpha * A^T * x + beta * y
*
* From Van Henson's modification of hypre_CSRMatrixMatvec.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixMatMultivecT(HYPRE_Complex alpha, hypre_CSRMatrix *A,
hypre_Multivector *x, HYPRE_Complex beta,
hypre_Multivector *y)
{
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int num_cols = hypre_CSRMatrixNumCols(A);
HYPRE_Complex *x_data = hypre_MultivectorData(x);
HYPRE_Complex *y_data = hypre_MultivectorData(y);
HYPRE_Int x_size = hypre_MultivectorSize(x);
HYPRE_Int y_size = hypre_MultivectorSize(y);
HYPRE_Int num_vectors = hypre_MultivectorNumVectors(x);
HYPRE_Int *x_active_ind= x->active_indices;
HYPRE_Int *y_active_ind= y->active_indices;
HYPRE_Int num_active_vectors = x->num_active_vectors;
HYPRE_Complex temp;
HYPRE_Int i, jv, jj, size, ierr = 0;
/*---------------------------------------------------------------------
* Check for size compatibility. MatvecT returns ierr = 1 if
* length of X doesn't equal the number of rows of A,
* ierr = 2 if the length of Y doesn't equal the number of
* columns of A, and ierr = 3 if both are true.
*
* Because temporary vectors are often used in MatvecT, none of
* these conditions terminates processing, and the ierr flag
* is informational only.
*--------------------------------------------------------------------*/
hypre_assert(num_active_vectors == y->num_active_vectors);
if (num_rows != x_size) ierr = 1;
if (num_cols != y_size) ierr = 2;
if (num_rows != x_size && num_cols != y_size) ierr = 3;
/*-----------------------------------------------------------------------
* Do (alpha == 0.0) computation - RDF: USE MACHINE EPS
*-----------------------------------------------------------------------*/
if (alpha == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++) y_data[i] *= beta;
return ierr;
}
/*-----------------------------------------------------------------------
* y = (beta/alpha)*y
*-----------------------------------------------------------------------*/
temp = beta / alpha;
if (temp != 1.0)
{
if (temp == 0.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++) y_data[i] = 0.0;
}
else
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++) y_data[i] *= temp;
}
}
/*-----------------------------------------------------------------
* y += A^T*x
*-----------------------------------------------------------------*/
if ( num_vectors==1 )
{
for (i = 0; i < num_rows; i++)
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
y_data[A_j[jj]] += A_data[jj] * x_data[i];
}
}
else
{
for ( jv=0; jv<num_vectors; ++jv )
{
for (jj = A_i[i]; jj < A_i[i+1]; jj++)
y_data[A_j[jj]+jv*y_size] += A_data[jj] * x_data[i+jv*x_size];
}
}
/*-----------------------------------------------------------------
* y = alpha*y
*-----------------------------------------------------------------*/
if (alpha != 1.0)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_cols*num_vectors; i++)
y_data[i] *= alpha;
}
return ierr;
}
|
GB_binop__bset_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bset_int8
// A.*B function (eWiseMult): GB_AemultB__bset_int8
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bset_int8
// C+=b function (dense accum): GB_Cdense_accumb__bset_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bset_int8
// C=scalar+B GB_bind1st__bset_int8
// C=scalar+B' GB_bind1st_tran__bset_int8
// C=A+scalar GB_bind2nd__bset_int8
// C=A'+scalar GB_bind2nd_tran__bset_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = GB_BITSET (aij, bij, int8_t, 8)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_BITSET (x, y, int8_t, 8) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_INT8 || GxB_NO_BSET_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bset_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bset_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bset_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bset_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bset_int8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bset_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t bij = Bx [p] ;
Cx [p] = GB_BITSET (x, bij, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bset_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
Cx [p] = GB_BITSET (aij, y, int8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (x, aij, int8_t, 8) ; \
}
GrB_Info GB_bind1st_tran__bset_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITSET (aij, y, int8_t, 8) ; \
}
GrB_Info GB_bind2nd_tran__bset_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mode_op.h | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <iostream>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/transpose_op.h"
namespace paddle {
namespace operators {
template <typename T, typename Type>
static void getMode(Type input_height, Type input_width, int input_dim,
const framework::Tensor* input, T* t_out, Type* t_indices) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (Type i = 0; i < input_height; ++i) {
std::vector<std::pair<T, Type>> col_vec;
col_vec.reserve(input_width);
if (input_dim == 1) {
auto e_input = framework::EigenVector<T>::Flatten(*input);
for (Type j = 0; j < input_width; ++j) {
col_vec.emplace_back(std::pair<T, Type>(e_input(j), j));
}
} else {
auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1);
for (Type j = 0; j < input_width; ++j) {
col_vec.emplace_back(std::pair<T, Type>(e_input(i, j), j));
}
}
std::sort(col_vec.begin(), col_vec.end(),
[](const std::pair<T, Type>& l, const std::pair<T, Type>& r) {
return (!std::isnan(static_cast<double>(l.first)) &&
std::isnan(static_cast<double>(r.first))) ||
(l.first < r.first);
});
T mode = 0;
int64_t indice = 0;
int64_t cur_freq = 0;
int64_t max_freq = 0;
for (int64_t i = 0; i < input_width; ++i) {
++cur_freq;
if (i == input_width - 1 || (col_vec[i + 1].first != col_vec[i].first)) {
if (cur_freq > max_freq) {
max_freq = cur_freq;
mode = col_vec[i].first;
indice = col_vec[i].second;
}
cur_freq = 0;
}
}
t_out[i] = mode;
t_indices[i] = indice;
}
}
template <typename T, typename Type>
static void ModeAssign(const Type& input_height, const Type& input_width,
const int& input_dim, const framework::Tensor* input,
const framework::Tensor* indices, T* output_data) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (Type i = 0; i < input_height; ++i) {
if (input_dim == 1) {
auto e_input = framework::EigenVector<T>::Flatten(*input);
auto e_indices = framework::EigenVector<Type>::Flatten(*indices);
output_data[i * input_width + e_indices(0)] = e_input(0);
} else {
auto e_input = framework::EigenMatrix<T>::Reshape(*input, input_dim - 1);
auto e_indices =
framework::EigenMatrix<Type>::Reshape(*indices, input_dim - 1);
output_data[i * input_width + e_indices(i, 0)] = e_input(i, 0);
}
}
}
template <typename DeviceContext, typename T>
class ModeCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input = context.Input<framework::Tensor>("X");
auto* output = context.Output<framework::Tensor>("Out");
auto* indices = context.Output<framework::Tensor>("Indices");
const auto& in_dims = input->dims();
bool keepdim = static_cast<bool>(context.Attr<bool>("keepdim"));
// axis < 0, cacluate the real axis
int axis = static_cast<int>(context.Attr<int>("axis"));
if (axis < 0) axis += in_dims.size();
T* output_data = output->mutable_data<T>(context.GetPlace());
int64_t* indices_data = indices->mutable_data<int64_t>(context.GetPlace());
auto out_dims = output->dims();
// if axis is not the last dim, transpose it to the last dim, do the
// calculation,
// then tranpose it back to orginal axis.
if (axis == in_dims.size() - 1) {
const int64_t& input_height =
phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t& input_width = in_dims[in_dims.size() - 1];
getMode<T, int64_t>(input_height, input_width, in_dims.size(), input,
output_data, indices_data);
} else {
std::vector<int> trans_axis;
for (int i = 0; i < axis; i++) {
trans_axis.emplace_back(i);
}
trans_axis.push_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans_axis.emplace_back(i);
}
trans_axis.emplace_back(axis);
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(in_dims[i]);
}
framework::DDim tmp_out_dim = phi::make_ddim(tmp_out_shape);
output->Resize(tmp_out_dim);
indices->Resize(tmp_out_dim);
}
// get the trans input_dims, out_dims
framework::DDim trans_shape(in_dims);
framework::DDim trans_out_shape(in_dims);
for (size_t i = 0; i < trans_axis.size(); i++) {
trans_shape[i] = in_dims[trans_axis[i]];
trans_out_shape[i] = in_dims[trans_axis[i]];
}
trans_out_shape[in_dims.size() - 1] = 1;
framework::Tensor trans_input;
trans_input.mutable_data<T>(trans_shape, context.GetPlace());
int ndims = trans_axis.size();
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
// transpose the input value
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, *input,
&trans_input, trans_axis);
const int64_t input_height =
phi::product(phi::slice_ddim(trans_shape, 0, trans_shape.size() - 1));
const int64_t input_width = trans_shape[trans_shape.size() - 1];
framework::Tensor tmp_out;
T* t_out = tmp_out.mutable_data<T>(trans_out_shape, context.GetPlace());
framework::Tensor tmp_indices;
auto* t_ind = tmp_indices.mutable_data<int64_t>(trans_out_shape,
context.GetPlace());
getMode<T, int64_t>(input_height, input_width, in_dims.size(),
&trans_input, t_out, t_ind);
// transpose back
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, tmp_indices, indices, trans_axis);
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out,
output, trans_axis);
if (!keepdim) {
output->Resize(out_dims);
indices->Resize(out_dims);
}
}
}
};
template <typename DeviceContext, typename T>
class ModeGradCPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* x = context.Input<framework::Tensor>("X");
auto* out_grad =
context.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* indices = context.Input<framework::Tensor>("Indices");
auto* x_grad =
context.Output<framework::Tensor>(framework::GradVarName("X"));
int axis = static_cast<int>(context.Attr<int>("axis"));
bool keepdim = static_cast<bool>(context.Attr<bool>("keepdim"));
auto in_dims = x->dims();
auto out_dims = indices->dims();
// axis < 0, get the real axis
axis = (axis < 0) ? (in_dims.size() + axis) : axis;
if (!keepdim) {
std::vector<int> tmp_out_shape;
for (int i = 0; i < axis; i++) {
tmp_out_shape.emplace_back(out_dims[i]);
}
tmp_out_shape.emplace_back(1);
for (int i = axis + 1; i < in_dims.size(); i++) {
tmp_out_shape.emplace_back(out_dims[i - 1]);
}
out_dims = phi::make_ddim(tmp_out_shape);
}
T* x_grad_data = x_grad->mutable_data<T>(context.GetPlace());
if (axis == in_dims.size() - 1) {
// allocate the memory for the input_grad
// assign the out_grad to input_grad directly
const int64_t input_height =
phi::product(phi::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t input_width = in_dims[in_dims.size() - 1];
// init the output grad with 0, because some input elements has no grad
memset(x_grad_data, 0, x_grad->numel() * sizeof(T));
// Assign the output_grad to input_grad
if (keepdim) {
ModeAssign(input_height, input_width, in_dims.size(), out_grad, indices,
x_grad_data);
} else {
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
framework::Tensor out_grad_tmp;
framework::Tensor indices_tmp;
out_grad_tmp.mutable_data<T>(out_grad->dims(), dev_context.GetPlace());
indices_tmp.mutable_data<int64_t>(indices->dims(),
dev_context.GetPlace());
framework::TensorCopy(*out_grad, dev_context.GetPlace(), dev_context,
&out_grad_tmp);
framework::TensorCopy(*indices, dev_context.GetPlace(), dev_context,
&indices_tmp);
out_grad_tmp.Resize(out_dims);
indices_tmp.Resize(out_dims);
ModeAssign(input_height, input_width, in_dims.size(), &out_grad_tmp,
&indices_tmp, x_grad_data);
}
} else {
// can not assign grad to input_grad, must do the transpose
std::vector<int> trans_axis;
for (int i = 0; i < axis; i++) {
trans_axis.emplace_back(i);
}
trans_axis.emplace_back(out_dims.size() - 1);
for (int i = axis + 1; i < out_dims.size() - 1; i++) {
trans_axis.emplace_back(i);
}
trans_axis.emplace_back(axis);
framework::DDim trans_shape(out_dims);
framework::DDim trans_in_shape(in_dims);
for (size_t i = 0; i < trans_axis.size(); i++) {
trans_shape[i] = out_dims[trans_axis[i]];
trans_in_shape[i] = in_dims[trans_axis[i]];
}
// transpose the out_grad, indices
framework::Tensor trans_dO;
trans_dO.mutable_data<T>(trans_shape, context.GetPlace());
framework::Tensor trans_ind;
trans_ind.mutable_data<int64_t>(trans_shape, context.GetPlace());
int ndims = trans_axis.size();
auto& dev_context =
context.template device_context<platform::CPUDeviceContext>();
if (keepdim) {
// Do transpose
TransCompute<platform::CPUDeviceContext, T>(
ndims, dev_context, *out_grad, &trans_dO, trans_axis);
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, *indices, &trans_ind, trans_axis);
} else {
framework::Tensor out_grad_tmp;
framework::Tensor indices_tmp;
out_grad_tmp.mutable_data<T>(out_grad->dims(), dev_context.GetPlace());
indices_tmp.mutable_data<int64_t>(indices->dims(),
dev_context.GetPlace());
framework::TensorCopy(*out_grad, dev_context.GetPlace(), dev_context,
&out_grad_tmp);
framework::TensorCopy(*indices, dev_context.GetPlace(), dev_context,
&indices_tmp);
out_grad_tmp.Resize(out_dims);
indices_tmp.Resize(out_dims);
// Do transpose
TransCompute<platform::CPUDeviceContext, T>(
ndims, dev_context, out_grad_tmp, &trans_dO, trans_axis);
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_context, indices_tmp, &trans_ind, trans_axis);
}
const int64_t input_height = phi::product(
phi::slice_ddim(trans_in_shape, 0, trans_in_shape.size() - 1));
const int64_t input_width = trans_in_shape[trans_in_shape.size() - 1];
// Assign the out_grad to tranpose input_grad
framework::Tensor tmp_out;
T* t_out = tmp_out.mutable_data<T>(trans_in_shape, context.GetPlace());
memset(t_out, 0, x_grad->numel() * sizeof(T));
ModeAssign<T, int64_t>(input_height, input_width, in_dims.size(),
&trans_dO, &trans_ind, t_out);
// Transpose back
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_context, tmp_out,
x_grad, trans_axis);
}
}
};
} // namespace operators
} // namespace paddle
|
NBM.c | #define VERBOSE
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <omp.h>
#include "leergml.h"
RED red;
int twom; // Va a ser el doble del número de aristas
// lo queremos para muchas cosas.
int **m; // El conteo de las aristas.
int arista_1, arista_2;
int kronecker(int x, int y){
if(x==y) return 1;
else return 0;
}
int main(int argc, char *argv[])
{
int u,chunk,nthreads,tid;
int i,j,k,l;
#ifdef VERBOSE
fprintf(stderr,"Leyendo la red...\n");
#endif
leer_red(&red,stdin);
for (u=twom=0; u<red.nnodos; u++) twom += red.nodo[u].grado;
#ifdef VERBOSE
fprintf(stderr,"Red con %i nodos y %i aristas\n",
red.nnodos,twom/2);
#endif
#ifdef VERBOSE
fprintf(stderr,"\n");
#endif
#ifdef VERBOSE
int ** nbm;
int renglon, columna;
nbm = (int **)calloc(2*twom,sizeof(int));
for (renglon=0; renglon<2*twom;renglon++){
nbm[renglon] = (int *)calloc(2*twom,sizeof(int));
}
chunk=8;
#pragma omp parallel shared(nbm,nthreads,chunk) private(i,j,k,l,tid)
{
tid = omp_get_thread_num();
if ( tid == 0)
{
nthreads = omp_get_num_threads();
}
#pragma omp for schedule (static, chunk)
for(i=0;i<red.nnodos;i++){
for(j=0;j<red.nodo[i].grado;j++){
arista_2=0;
for(k=0;k<red.nnodos;k++){
for(l=0;l<red.nodo[k].grado;l++){
nbm[arista_1][arista_2] = \
kronecker(red.nodo[k].id,red.nodo[i].arista[j].entrada)* \
(1-kronecker(red.nodo[i].id,red.nodo[k].arista[l].entrada));
arista_2++;
}
}
arista_1++;
}
}
}
for(renglon=0;renglon<twom;renglon++){
for(columna=0;columna<twom;columna++){
printf("%i ", nbm[renglon][columna]);
}
printf("\n");
}
#endif
}
|
GaussianProcess.h | /**
* @file GaussianProcess.h
* @author Jan Nguyen
* @date 17.05.2019
*/
#pragma once
#include <Eigen/Core>
#include <utility>
#include "AcquisitionFunction.h"
#include "GaussianHyperparameters.h"
#include "autopas/options/AcquisitionFunctionOption.h"
#include "autopas/utils/ExceptionHandler.h"
#include "autopas/utils/Math.h"
#include "autopas/utils/NumberSet.h"
#include "autopas/utils/Random.h"
#include "autopas/utils/WrapOpenMP.h"
namespace autopas {
/**
* Gaussian process is a stochastical model. It predicts the
* output of a blackbox function f(x) for given input x. To do so, some sample
* input-output pairs (x,f(x)) should be provided as evidence.
*
* Currently the squared exponential kernel is used.
* TODO: maybe offer some options.
*/
class GaussianProcess {
friend GaussianHyperparameters;
using Vector = Eigen::VectorXd;
// number of samples to find optimal hyperparameters
static constexpr size_t hp_sample_size = 10000;
// number of hyperparameters
static constexpr size_t hp_size = 100;
public:
/**
* Constructor
* @param dims number of input dimensions
* @param sigma fixed noise
* @param rngRef reference to rng
*/
GaussianProcess(size_t dims, double sigma, Random &rngRef)
: _inputs(),
_outputs(),
_dims(dims),
_evidenceMinValue(0),
_evidenceMaxValue(0),
_sigma(sigma),
_hypers(),
_rng(rngRef) {
tuneHyperparameters();
}
/**
* Change input dimension. Current evidence will be discarded.
* @param dims
*/
void setDimension(size_t dims) {
_dims = dims;
clear();
}
/**
* Discard all evidence.
*/
void clear() {
_inputs.clear();
_outputs = Eigen::VectorXd::Zero(0);
tuneHyperparameters();
}
/**
* Get the number of evidence provided.
* @return
*/
[[nodiscard]] size_t numEvidence() const { return _inputs.size(); }
/**
* Get all currently stored evidence
* @return pair of inputs and outputs
*/
[[nodiscard]] std::pair<const std::vector<Vector> &, const Eigen::VectorXd &> getEvidence() const {
return std::make_pair(std::cref(_inputs), std::cref(_outputs));
}
/**
* Provide a input-output pair as evidence.
* Each evidence improve the quality of future predictions.
* @param input x
* @param output f(x)
* @param tuneHypers if false hyperparemeters need to be set manually
*/
void addEvidence(const Vector &input, double output, bool tuneHypers) {
if (static_cast<size_t>(input.size()) != _dims) {
utils::ExceptionHandler::exception(
"GaussianProcess.addEvidence: size of input {} does not match specified dimensions {}", input.size(), _dims);
}
if (_inputs.empty()) {
// first evidence
_evidenceMinValue = _evidenceMaxValue = output;
_evidenceMaxVector = input;
} else if (output < _evidenceMinValue) {
_evidenceMinValue = output;
} else if (output > _evidenceMaxValue) {
_evidenceMaxValue = output;
_evidenceMaxVector = input;
}
_inputs.push_back(input);
long newSize = _inputs.size();
// extend output vector
_outputs.conservativeResize(newSize, Eigen::NoChange_t());
_outputs(newSize - 1) = output;
if (tuneHypers) {
tuneHyperparameters();
} else {
// hyperparameters should be recalculated
_hypers.clear();
}
}
/**
* Get the evidence with the highest output value
* @return input of max
*/
[[nodiscard]] const Vector &getEvidenceMax() const {
if (_inputs.empty()) {
utils::ExceptionHandler::exception("GaussianProcess has no evidence");
}
return _evidenceMaxVector;
}
/**
* Try to predict f(x) using the evidence
* provided so far.
* @param input x
* @return expected output of f(x)
*/
[[nodiscard]] double predictMean(const Vector &input) const {
if (static_cast<size_t>(input.size()) != _dims) {
utils::ExceptionHandler::exception(
"GaussianProcess.predictMean: size of input {} does not match specified dimensions {}", input.size(), _dims);
}
double result = 0.;
if (_inputs.empty()) {
// no evidence
for (const auto &hyper : _hypers) {
result += hyper.score * hyper.mean;
}
} else {
for (const auto &hyper : _hypers) {
result += hyper.score * (hyper.mean + kernelVector(input, hyper.theta, hyper.dimScales).dot(hyper.weights));
}
}
return result;
}
/**
* Get the variance if evidence are ignored.
* @return
*/
[[nodiscard]] double getDefaultVar() const {
double result = 0.;
for (const auto &hyper : _hypers) {
result += hyper.score * hyper.theta;
}
return result;
}
/**
* The variance of the predicted f(x) from predictMean().
* @param input x
* @return variance
*/
[[nodiscard]] double predictVar(const Vector &input) const {
if (static_cast<size_t>(input.size()) != _dims) {
utils::ExceptionHandler::exception(
"GaussianProcess.predictVar: size of input {} does not match specified dimensions {}", input.size(), _dims);
}
double result = 0.;
if (_inputs.empty()) {
// no evidence
return getDefaultVar();
} else {
for (const auto &hyper : _hypers) {
auto kVec = kernelVector(input, hyper.theta, hyper.dimScales);
result += hyper.score * (kernel(input, input, hyper.theta, hyper.dimScales) - kVec.dot(hyper.covMatInv * kVec));
}
}
return result;
}
/**
* Calculate the probability density of provided output given provided input.
* @param input
* @param output
* @return
*/
[[nodiscard]] double predictOutputPDF(const Vector &input, double output) const {
double stddev = std::sqrt(predictVar(input));
double mean = predictMean(input);
return utils::Math::normalPDF((mean - output) / stddev) / stddev;
}
/**
* Calculate the scaled probability density of provided output given provided input.
* The probability density is scaled such that the maximum is 1.
* @param input
* @param output
* @return
*/
[[nodiscard]] double predictOutputScaledPDF(const Vector &input, double output) const {
double stddev = std::sqrt(predictVar(input));
double mean = predictMean(input);
return utils::Math::normalPDF((mean - output) / stddev) / utils::Math::normalScale;
}
/**
* Calculates the acquisition function for given input.
* @param af acquisition function a:input->double
* @param input i
* @return a(i). This value can be compared with values a(x) of other inputs x to weigh which input would give the
* most gain if its evidence were provided.
*/
[[nodiscard]] inline double calcAcquisition(AcquisitionFunctionOption af, const Vector &input) const {
return AcquisitionFunction::calcAcquisition(af, predictMean(input), predictVar(input), _evidenceMaxValue);
}
/**
* Find the input in samples which maximizes given aquisition function.
* TODO: maybe add parameters for hyperparameters of aquisition functions
* @param af function to maximize
* @param samples
* @return
*/
[[nodiscard]] Vector sampleAquisitionMax(AcquisitionFunctionOption af, const std::vector<Vector> &samples) const {
size_t bestIdx = 0;
double bestVal = calcAcquisition(af, samples[0]);
// find optimmum from samples
for (size_t i = 1; i < samples.size(); ++i) {
double val = calcAcquisition(af, samples[i]);
if (val > bestVal) {
bestIdx = i;
bestVal = val;
}
}
return samples[bestIdx];
}
/**
* Generate hyperparameter samples.
* @param sampleSize size
* @param rng random number generator
* @param dims number of input dimension
* @param sigma fixed noise
* @param evidenceMinValue current lowest evidence output
* @param evidenceMaxValue current highest evidence output
* @return tuple [means, thetas, dimScales]
*/
[[nodiscard]] static std::tuple<std::vector<double>, std::vector<double>, std::vector<Eigen::VectorXd>>
generateHyperparameterSamples(size_t sampleSize, Random &rng, size_t dims, double sigma, double evidenceMinValue,
double evidenceMaxValue) {
// range of mean
// inside bounds of evidence outputs
NumberInterval<double> meanRange(evidenceMinValue, evidenceMaxValue);
// range of theta
// max sample stddev: (max - min)
// max stddev from zero: abs(min) & abs(max)
double thetaMax = std::pow(
std::max({evidenceMaxValue - evidenceMinValue, std::abs(evidenceMinValue), std::abs(evidenceMaxValue)}), 2);
// at least sigma
thetaMax = std::max(thetaMax, sigma);
NumberInterval<double> thetaRange(sigma, thetaMax);
// range of dimScale
// Assuming most distances are greater equal 1.
// For a dimScale d > 5 + ln(thetaMax): theta * exp(-d r) < 1%.
// So choosing a greater dimScale may lead to many kernels close to zero.
// But if needed the upper bound can be increased.
NumberInterval<double> dimScaleRange(0., 5. + std::max(0., std::log(thetaMax)));
// generate mean
auto sample_means = meanRange.uniformSample(sampleSize, rng);
// generate theta
auto sample_thetas = thetaRange.uniformSample(sampleSize, rng);
// generate dimScale
std::vector<std::vector<double>> sample_dimScaleData;
sample_dimScaleData.reserve(dims);
for (size_t d = 0; d < dims; ++d) {
sample_dimScaleData.emplace_back(dimScaleRange.uniformSample(sampleSize, rng));
}
// convert dimScales to Vectors
std::vector<Eigen::VectorXd> sample_dimScales;
sample_dimScales.reserve(sampleSize);
for (size_t t = 0; t < sampleSize; ++t) {
std::vector<double> dimScaleData;
dimScaleData.reserve(dims);
for (size_t d = 0; d < dims; ++d) {
dimScaleData.push_back(sample_dimScaleData[d][t]);
}
sample_dimScales.emplace_back(utils::Math::makeVectorXd(dimScaleData));
}
return std::make_tuple(sample_means, sample_thetas, sample_dimScales);
}
/**
* Get current hyperparameters.
* @return
*/
[[nodiscard]] std::vector<GaussianHyperparameters> &getHyperparameters() { return _hypers; }
/**
* Set the hyperparameters: means, theta, dimScale.
* The samples are scored equal to the probability that given evidence and hyperparameter-sample
* generates given output. Hyperparameters weights should be normalized.
* @param sample_means
* @param sample_thetas
* @param sample_dimScales
*/
void setHyperparameters(const std::vector<double> &sample_means, const std::vector<double> &sample_thetas,
const std::vector<Eigen::VectorXd> &sample_dimScales) {
size_t hyperSize = sample_means.size();
_hypers.clear();
// initialize hyperparameter samples
_hypers.reserve(hyperSize);
for (size_t t = 0; t < hyperSize; ++t) {
_hypers.emplace_back(sample_means[t], sample_thetas[t], sample_dimScales[t]);
}
// precalculate matrices for all hyperparameters
// @TODO find sensible chunkSize
#ifdef AUTOPAS_OPENMP
const size_t chunkSize = std::max(hyperSize / (autopas_get_num_threads() * 10), 1ul);
#pragma omp parallel for schedule(dynamic, chunkSize)
#endif
for (size_t t = 0; t < hyperSize; ++t) {
_hypers[t].precalculate(_sigma, _inputs, _outputs);
}
}
/**
* Normalize weights of hyperparameters and truncate lowest weights.
*/
void normalizeHyperparameters() {
// sort by score
std::sort(_hypers.begin(), _hypers.end(),
[](const GaussianHyperparameters &h1, const GaussianHyperparameters &h2) { return h1.score > h2.score; });
// only keep hp_size highest scores
if (_hypers.size() > hp_size) {
_hypers.erase(_hypers.begin() + hp_size, _hypers.end());
}
// normalize scores
double scoreSum = 0.;
for (auto &hyper : _hypers) {
scoreSum += hyper.score;
}
if (scoreSum > 0) {
for (auto &hyper : _hypers) {
hyper.score /= scoreSum;
}
} else {
// all scores are 0
double uniformProbability = 1. / _hypers.size();
for (auto &hyper : _hypers) {
hyper.score = uniformProbability;
}
}
}
private:
/**
* Hyperparameter means, theta and dimScale are randomly generated.
* The samples are combined using a weighted average. The weight of a sample
* equals to the probability that given evidence and hyperparameter-sample
* generates given output. The lowest weights are truncated.
*/
void tuneHyperparameters() {
// number of evidence
size_t newSize = _inputs.size();
_hypers.clear();
// if no evidence
if (newSize == 0) {
// use default values
_hypers.emplace_back(0., 1., Eigen::VectorXd::Ones(_dims));
_hypers[0].precalculate(_sigma, _inputs, _outputs);
_hypers[0].score = 1.;
return;
}
auto [sample_means, sample_thetas, sample_dimScales] =
generateHyperparameterSamples(hp_sample_size, _rng, _dims, _sigma, _evidenceMinValue, _evidenceMaxValue);
setHyperparameters(sample_means, sample_thetas, sample_dimScales);
normalizeHyperparameters();
}
/**
* Kernel function to describe similarity between two inputs
* using given hyperparameters.
* @param input1
* @param input2
* @param theta
* @param dimScale
* @return
*/
[[nodiscard]] static inline double kernel(const Vector &input1, const Vector &input2, double theta,
const Eigen::VectorXd &dimScale) {
double dot = 0;
for (int i = 0; i < input1.size(); ++i) {
double dist = input1[i] - input2[i];
dist *= dist * dimScale[i];
dot += dist;
}
return theta * std::exp(-dot);
}
/**
* Calculates the kernel between input and all evidence.
* @param input
* @param theta
* @param dimScale
* @return Vector of covariances
*/
[[nodiscard]] Eigen::VectorXd kernelVector(const Vector &input, double theta, const Eigen::VectorXd &dimScale) const {
std::vector<double> k(_inputs.size());
for (size_t i = 0; i < k.size(); ++i) {
k[i] = kernel(input, _inputs[i], theta, dimScale);
}
return utils::Math::makeVectorXd(k);
}
std::vector<Vector> _inputs;
Eigen::VectorXd _outputs;
/**
* Number of input dimensions.
*/
size_t _dims;
/**
* Current smallest evidence output.
*/
double _evidenceMinValue;
/**
* Current greatest evidence output.
*/
double _evidenceMaxValue;
/**
* Current greatest evidence input.
*/
Vector _evidenceMaxVector;
/**
* Fixed noise assumed.
*/
const double _sigma;
/**
* Sampled hyperparameters including precalculated matrices and score
*/
std::vector<GaussianHyperparameters> _hypers;
Random &_rng;
};
} // namespace autopas
|
ompParallelLoopDirective.h | #ifndef SINGLETHREADED
#pragma omp parallel for num_threads(nThreads)
#endif
|
GB_emult_03.c | //------------------------------------------------------------------------------
// GB_emult_03: C<M>= A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// C<M>= A.*B, M sparse/hyper, A and B bitmap/full. C has the same sparsity
// structure as M, and its pattern is a subset of M.
// ------------------------------------------
// C <M>= A .* B
// ------------------------------------------
// sparse sparse bitmap bitmap (method: 03)
// sparse sparse bitmap full (method: 03)
// sparse sparse full bitmap (method: 03)
// sparse sparse full full (method: 03)
// TODO: this function can also do eWiseAdd, just as easily.
// Just change the "&&" to "||" in the GB_emult_03_template.
// If A and B are both full, eadd and emult are identical.
#include "GB_ewise.h"
#include "GB_emult.h"
#include "GB_binop.h"
#include "GB_unused.h"
#ifndef GBCOMPACT
#include "GB_binop__include.h"
#endif
#define GB_FREE_WORK \
{ \
GB_WERK_POP (Work, int64_t) ; \
GB_WERK_POP (M_ek_slicing, int64_t) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_phbix_free (C) ; \
}
GrB_Info GB_emult_03 // C<M>=A.*B, M sparse/hyper, A and B bitmap/full
(
GrB_Matrix C, // output matrix, static header
const GrB_Type ctype, // type of output matrix C
const bool C_is_csc, // format of output matrix C
const GrB_Matrix M, // sparse/hyper, not NULL
const bool Mask_struct, // if true, use the only structure of M
bool *mask_applied, // if true, the mask was applied
const GrB_Matrix A, // input A matrix (bitmap/full)
const GrB_Matrix B, // input B matrix (bitmap/full)
const GrB_BinaryOp op, // op to perform C = op (A,B)
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
ASSERT (C != NULL && C->static_header) ;
ASSERT_MATRIX_OK (M, "M for emult_03", GB0) ;
ASSERT_MATRIX_OK (A, "A for emult_03", GB0) ;
ASSERT_MATRIX_OK (B, "B for emult_03", GB0) ;
ASSERT_BINARYOP_OK (op, "op for emult_03", GB0) ;
ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ;
ASSERT (!GB_PENDING (M)) ;
ASSERT (GB_JUMBLED_OK (M)) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (GB_IS_BITMAP (A) || GB_IS_FULL (A) || GB_as_if_full (A)) ;
ASSERT (GB_IS_BITMAP (B) || GB_IS_FULL (B) || GB_as_if_full (B)) ;
int C_sparsity = GB_sparsity (M) ;
GBURBLE ("emult_03:(%s<%s>=%s.*%s) ",
GB_sparsity_char (C_sparsity),
GB_sparsity_char_matrix (M),
GB_sparsity_char_matrix (A),
GB_sparsity_char_matrix (B)) ;
//--------------------------------------------------------------------------
// declare workspace
//--------------------------------------------------------------------------
GB_WERK_DECLARE (Work, int64_t) ;
int64_t *restrict Wfirst = NULL ;
int64_t *restrict Wlast = NULL ;
int64_t *restrict Cp_kfirst = NULL ;
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
//--------------------------------------------------------------------------
// get M, A, and B
//--------------------------------------------------------------------------
const int64_t *restrict Mp = M->p ;
const int64_t *restrict Mh = M->h ;
const int64_t *restrict Mi = M->i ;
const GB_void *restrict Mx = (Mask_struct) ? NULL : (GB_void *) M->x ;
const int64_t vlen = M->vlen ;
const int64_t vdim = M->vdim ;
const int64_t nvec = M->nvec ;
const int64_t mnz = GB_NNZ (M) ;
const size_t msize = M->type->size ;
const int8_t *restrict Ab = A->b ;
const int8_t *restrict Bb = B->b ;
//--------------------------------------------------------------------------
// allocate C->p and C->h
//--------------------------------------------------------------------------
GB_OK (GB_new (&C, true, // sparse or hyper (same as M), static header
ctype, vlen, vdim, GB_Ap_calloc, C_is_csc,
C_sparsity, M->hyper_switch, nvec, Context)) ;
int64_t *restrict Cp = C->p ;
//--------------------------------------------------------------------------
// slice the mask matrix M
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int M_ntasks, M_nthreads ;
GB_SLICE_MATRIX (M, 8, chunk) ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
GB_WERK_PUSH (Work, 3*M_ntasks, int64_t) ;
if (Work == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
Wfirst = Work ;
Wlast = Work + M_ntasks ;
Cp_kfirst = Work + M_ntasks * 2 ;
//--------------------------------------------------------------------------
// count entries in C
//--------------------------------------------------------------------------
// This phase is very similar to GB_select_phase1 (GB_ENTRY_SELECTOR).
// TODO: if M is structural and A and B are both full, then C has exactly
// the same pattern as M, the first phase can be skipped.
int tid ;
#pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1)
for (tid = 0 ; tid < M_ntasks ; tid++)
{
int64_t kfirst = kfirst_Mslice [tid] ;
int64_t klast = klast_Mslice [tid] ;
Wfirst [tid] = 0 ;
Wlast [tid] = 0 ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
// count the entries in C(:,j)
int64_t j = GBH (Mh, k) ;
int64_t pstart = j * vlen ; // start of A(:,j) and B(:,j)
int64_t pM, pM_end ;
GB_get_pA (&pM, &pM_end, tid, k,
kfirst, klast, pstart_Mslice, Mp, vlen) ;
int64_t cjnz = 0 ;
for ( ; pM < pM_end ; pM++)
{
bool mij = GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t i = Mi [pM] ;
cjnz +=
(GBB (Ab, pstart + i)
&& // TODO: for GB_add, use || instead
GBB (Bb, pstart + i)) ;
}
}
if (k == kfirst)
{
Wfirst [tid] = cjnz ;
}
else if (k == klast)
{
Wlast [tid] = cjnz ;
}
else
{
Cp [k] = cjnz ;
}
}
}
//--------------------------------------------------------------------------
// finalize Cp, cumulative sum of Cp and compute Cp_kfirst
//--------------------------------------------------------------------------
GB_ek_slice_merge1 (Cp, Wfirst, Wlast, M_ek_slicing, M_ntasks) ;
GB_ek_slice_merge2 (&(C->nvec_nonempty), Cp_kfirst, Cp, nvec,
Wfirst, Wlast, M_ek_slicing, M_ntasks, M_nthreads, Context) ;
//--------------------------------------------------------------------------
// allocate C->i and C->x
//--------------------------------------------------------------------------
int64_t cnz = Cp [nvec] ;
GB_OK (GB_bix_alloc (C, cnz, false, false, true, true, Context)) ;
//--------------------------------------------------------------------------
// copy pattern into C
//--------------------------------------------------------------------------
// TODO: could make these components of C shallow instead
if (GB_IS_HYPERSPARSE (M))
{
// copy M->h into C->h
GB_memcpy (C->h, Mh, nvec * sizeof (int64_t), M_nthreads) ;
}
C->nvec = nvec ;
C->jumbled = M->jumbled ;
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// get the opcode
//--------------------------------------------------------------------------
GB_Opcode opcode = op->opcode ;
bool op_is_positional = GB_OPCODE_IS_POSITIONAL (opcode) ;
bool op_is_first = (opcode == GB_FIRST_opcode) ;
bool op_is_second = (opcode == GB_SECOND_opcode) ;
bool op_is_pair = (opcode == GB_PAIR_opcode) ;
GB_Type_code ccode = ctype->code ;
//--------------------------------------------------------------------------
// check if the values of A and/or B are ignored
//--------------------------------------------------------------------------
// With C = ewisemult (A,B), only the intersection of A and B is used.
// If op is SECOND or PAIR, the values of A are never accessed.
// If op is FIRST or PAIR, the values of B are never accessed.
// If op is PAIR, the values of A and B are never accessed.
// Contrast with ewiseadd.
// A is passed as x, and B as y, in z = op(x,y)
bool A_is_pattern = op_is_second || op_is_pair || op_is_positional ;
bool B_is_pattern = op_is_first || op_is_pair || op_is_positional ;
//--------------------------------------------------------------------------
// using a built-in binary operator (except for positional operators)
//--------------------------------------------------------------------------
bool done = false ;
#ifndef GBCOMPACT
//----------------------------------------------------------------------
// define the worker for the switch factory
//----------------------------------------------------------------------
#define GB_AemultB_03(mult,xname) GB (_AemultB_03_ ## mult ## xname)
#define GB_BINOP_WORKER(mult,xname) \
{ \
info = GB_AemultB_03(mult,xname) (C, M, Mask_struct, A, B, \
Cp_kfirst, M_ek_slicing, M_ntasks, M_nthreads) ; \
done = (info != GrB_NO_VALUE) ; \
} \
break ;
//----------------------------------------------------------------------
// launch the switch factory
//----------------------------------------------------------------------
GB_Type_code xcode, ycode, zcode ;
if (!op_is_positional &&
GB_binop_builtin (A->type, A_is_pattern, B->type, B_is_pattern,
op, false, &opcode, &xcode, &ycode, &zcode) && ccode == zcode)
{
#include "GB_binop_factory.c"
}
#endif
//--------------------------------------------------------------------------
// generic worker
//--------------------------------------------------------------------------
if (!done)
{
GB_BURBLE_MATRIX (C, "(generic emult_03: %s) ", op->name) ;
GB_ewise_generic (C, op, NULL, 0, 0,
NULL, NULL, NULL, C_sparsity, GB_EMULT_METHOD_03, Cp_kfirst,
M_ek_slicing, M_ntasks, M_nthreads, NULL, 0, 0, NULL, 0, 0,
M, Mask_struct, false, A, B, Context) ;
}
//--------------------------------------------------------------------------
// remove empty vectors from C, if hypersparse
//--------------------------------------------------------------------------
GB_OK (GB_hypermatrix_prune (C, Context)) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
ASSERT_MATRIX_OK (C, "C output for emult_03", GB0) ;
(*mask_applied) = true ;
return (GrB_SUCCESS) ;
}
|
updater_basemaker-inl.h | /*!
* Copyright 2014 by Contributors
* \file updater_basemaker-inl.h
* \brief implement a common tree constructor
* \author Tianqi Chen
*/
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#include <rabit/rabit.h>
#include <xgboost/base.h>
#include <xgboost/tree_updater.h>
#include <vector>
#include <algorithm>
#include <string>
#include <limits>
#include <utility>
#include "./param.h"
#include "../common/io.h"
#include "../common/random.h"
#include "../common/quantile.h"
namespace xgboost {
namespace tree {
/*!
* \brief base tree maker class that defines common operation
* needed in tree making
*/
class BaseMaker: public TreeUpdater {
public:
void Configure(const Args& args) override {
param_.InitAllowUnknown(args);
}
protected:
// helper to collect and query feature meta information
struct FMetaHelper {
public:
/*! \brief find type of each feature, use column format */
inline void InitByCol(DMatrix* p_fmat,
const RegTree& tree) {
fminmax_.resize(tree.param.num_feature * 2);
std::fill(fminmax_.begin(), fminmax_.end(),
-std::numeric_limits<bst_float>::max());
// start accumulating statistics
for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) {
for (bst_uint fid = 0; fid < batch.Size(); ++fid) {
auto c = batch[fid];
if (c.size() != 0) {
CHECK_LT(fid * 2, fminmax_.size());
fminmax_[fid * 2 + 0] =
std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]);
fminmax_[fid * 2 + 1] =
std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]);
}
}
}
}
/*! \brief synchronize the information */
inline void SyncInfo() {
rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size());
}
// get feature type, 0:empty 1:binary 2:real
inline int Type(bst_uint fid) const {
CHECK_LT(fid * 2 + 1, fminmax_.size())
<< "FeatHelper fid exceed query bound ";
bst_float a = fminmax_[fid * 2];
bst_float b = fminmax_[fid * 2 + 1];
if (a == -std::numeric_limits<bst_float>::max()) return 0;
if (-a == b) {
return 1;
} else {
return 2;
}
}
inline bst_float MaxValue(bst_uint fid) const {
return fminmax_[fid *2 + 1];
}
inline void SampleCol(float p, std::vector<bst_uint> *p_findex) const {
std::vector<bst_uint> &findex = *p_findex;
findex.clear();
for (size_t i = 0; i < fminmax_.size(); i += 2) {
const auto fid = static_cast<bst_uint>(i / 2);
if (this->Type(fid) != 0) findex.push_back(fid);
}
auto n = static_cast<unsigned>(p * findex.size());
std::shuffle(findex.begin(), findex.end(), common::GlobalRandom());
findex.resize(n);
// sync the findex if it is subsample
std::string s_cache;
common::MemoryBufferStream fc(&s_cache);
dmlc::Stream& fs = fc;
if (rabit::GetRank() == 0) {
fs.Write(findex);
}
rabit::Broadcast(&s_cache, 0);
fs.Read(&findex);
}
private:
std::vector<bst_float> fminmax_;
};
// ------static helper functions ------
// helper function to get to next level of the tree
/*! \brief this is helper function for row based data*/
inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) {
const RegTree::Node &n = tree[nid];
bst_uint findex = n.SplitIndex();
for (const auto& ins : inst) {
if (findex == ins.index) {
if (ins.fvalue < n.SplitCond()) {
return n.LeftChild();
} else {
return n.RightChild();
}
}
}
return n.DefaultChild();
}
// ------class member helpers---------
/*! \brief initialize temp data structure */
inline void InitData(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree) {
CHECK_EQ(tree.param.num_nodes, tree.param.num_roots)
<< "TreeMaker: can only grow new tree";
const std::vector<unsigned> &root_index = fmat.Info().root_index_;
{
// setup position
position_.resize(gpair.size());
if (root_index.size() == 0) {
std::fill(position_.begin(), position_.end(), 0);
} else {
for (size_t i = 0; i < position_.size(); ++i) {
position_[i] = root_index[i];
CHECK_LT(root_index[i], (unsigned)tree.param.num_roots)
<< "root index exceed setting";
}
}
// mark delete for the deleted datas
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i];
}
// mark subsample
if (param_.subsample < 1.0f) {
std::bernoulli_distribution coin_flip(param_.subsample);
auto& rnd = common::GlobalRandom();
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) continue;
if (!coin_flip(rnd)) position_[i] = ~position_[i];
}
}
}
{
// expand query
qexpand_.reserve(256); qexpand_.clear();
for (int i = 0; i < tree.param.num_roots; ++i) {
qexpand_.push_back(i);
}
this->UpdateNode2WorkIndex(tree);
}
}
/*! \brief update queue expand add in new leaves */
inline void UpdateQueueExpand(const RegTree &tree) {
std::vector<int> newnodes;
for (int nid : qexpand_) {
if (!tree[nid].IsLeaf()) {
newnodes.push_back(tree[nid].LeftChild());
newnodes.push_back(tree[nid].RightChild());
}
}
// use new nodes for qexpand
qexpand_ = newnodes;
this->UpdateNode2WorkIndex(tree);
}
// return decoded position
inline int DecodePosition(bst_uint ridx) const {
const int pid = position_[ridx];
return pid < 0 ? ~pid : pid;
}
// encode the encoded position value for ridx
inline void SetEncodePosition(bst_uint ridx, int nid) {
if (position_[ridx] < 0) {
position_[ridx] = ~nid;
} else {
position_[ridx] = nid;
}
}
/*!
* \brief this is helper function uses column based data structure,
* reset the positions to the lastest one
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void ResetPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
// set the positions in the nondefault
this->SetNonDefaultPositionCol(nodes, p_fmat, tree);
this->SetDefaultPostion(p_fmat, tree);
}
/*!
* \brief helper function to set the non-leaf positions to default direction.
* This function can be applied multiple times and will get the same result.
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void SetDefaultPostion(DMatrix *p_fmat,
const RegTree &tree) {
// set default direct nodes to default
// for leaf nodes that are not fresh, mark then to ~nid,
// so that they are ignored in future statistics collection
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) {
const int nid = this->DecodePosition(ridx);
if (tree[nid].IsLeaf()) {
// mark finish when it is not a fresh leaf
if (tree[nid].RightChild() == -1) {
position_[ridx] = ~nid;
}
} else {
// push to default branch
if (tree[nid].DefaultLeft()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* to CORRECT the positions of non-default directions that WAS set to default
* before calling this function.
* \param batch The column batch
* \param sorted_split_set The set of index that contains split solutions.
* \param tree the regression tree structure
*/
inline void CorrectNonDefaultPositionByBatch(
const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set,
const RegTree &tree) {
for (size_t fid = 0; fid < batch.Size(); ++fid) {
auto col = batch[fid];
auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid);
if (it != sorted_split_set.end() && *it == fid) {
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
CHECK(tree[nid].IsLeaf());
int pid = tree[nid].Parent();
// go back to parent, correct those who are not default
if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) {
if (fvalue < tree[pid].SplitCond()) {
this->SetEncodePosition(ridx, tree[pid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[pid].RightChild());
}
}
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* \param nodes the set of nodes that contains the split to be used
* \param tree the regression tree structure
* \param out_split_set The split index set
*/
inline void GetSplitSet(const std::vector<int> &nodes,
const RegTree &tree,
std::vector<unsigned>* out_split_set) {
std::vector<unsigned>& fsplits = *out_split_set;
fsplits.clear();
// step 1, classify the non-default data into right places
for (int nid : nodes) {
if (!tree[nid].IsLeaf()) {
fsplits.push_back(tree[nid].SplitIndex());
}
}
std::sort(fsplits.begin(), fsplits.end());
fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin());
}
/*!
* \brief this is helper function uses column based data structure,
* update all positions into nondefault branch, if any, ignore the default branch
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
std::vector<unsigned> fsplits;
this->GetSplitSet(nodes, tree, &fsplits);
for (const auto &batch : p_fmat->GetBatches<SortedCSCPage>()) {
for (auto fid : fsplits) {
auto col = batch[fid];
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
// go back to parent, correct those who are not default
if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) {
if (fvalue < tree[nid].SplitCond()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
}
}
}
}
/*! \brief helper function to get statistics from a tree */
template<typename TStats>
inline void GetNodeStats(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree,
std::vector< std::vector<TStats> > *p_thread_temp,
std::vector<TStats> *p_node_stats) {
std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp;
thread_temp.resize(omp_get_max_threads());
p_node_stats->resize(tree.param.num_nodes);
#pragma omp parallel
{
const int tid = omp_get_thread_num();
thread_temp[tid].resize(tree.param.num_nodes, TStats());
for (unsigned int nid : qexpand_) {
thread_temp[tid][nid] = TStats();
}
}
// setup position
const auto ndata = static_cast<bst_omp_uint>(fmat.Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) {
const int nid = position_[ridx];
const int tid = omp_get_thread_num();
if (nid >= 0) {
thread_temp[tid][nid].Add(gpair[ridx]);
}
}
// sum the per thread statistics together
for (int nid : qexpand_) {
TStats &s = (*p_node_stats)[nid];
s = TStats();
for (size_t tid = 0; tid < thread_temp.size(); ++tid) {
s.Add(thread_temp[tid][nid]);
}
}
}
/*! \brief common helper data structure to build sketch */
struct SketchEntry {
/*! \brief total sum of amount to be met */
double sum_total;
/*! \brief statistics used in the sketch */
double rmin, wmin;
/*! \brief last seen feature value */
bst_float last_fvalue;
/*! \brief current size of sketch */
double next_goal;
// pointer to the sketch to put things in
common::WXQuantileSketch<bst_float, bst_float> *sketch;
// initialize the space
inline void Init(unsigned max_size) {
next_goal = -1.0f;
rmin = wmin = 0.0f;
sketch->temp.Reserve(max_size + 1);
sketch->temp.size = 0;
}
/*!
* \brief push a new element to sketch
* \param fvalue feature value, comes in sorted ascending order
* \param w weight
* \param max_size
*/
inline void Push(bst_float fvalue, bst_float w, unsigned max_size) {
if (next_goal == -1.0f) {
next_goal = 0.0f;
last_fvalue = fvalue;
wmin = w;
return;
}
if (last_fvalue != fvalue) {
double rmax = rmin + wmin;
if (rmax >= next_goal && sketch->temp.size != max_size) {
if (sketch->temp.size == 0 ||
last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
CHECK_LT(sketch->temp.size, max_size)
<< "invalid maximum size max_size=" << max_size
<< ", stemp.size" << sketch->temp.size;
++sketch->temp.size;
}
if (sketch->temp.size == max_size) {
next_goal = sum_total * 2.0f + 1e-5f;
} else {
next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size);
}
} else {
if (rmax >= next_goal) {
LOG(TRACKER) << "INFO: rmax=" << rmax
<< ", sum_total=" << sum_total
<< ", naxt_goal=" << next_goal
<< ", size=" << sketch->temp.size;
}
}
rmin = rmax;
wmin = w;
last_fvalue = fvalue;
} else {
wmin += w;
}
}
/*! \brief push final unfinished value to the sketch */
inline void Finalize(unsigned max_size) {
double rmax = rmin + wmin;
if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
CHECK_LE(sketch->temp.size, max_size)
<< "Finalize: invalid maximum size, max_size=" << max_size
<< ", stemp.size=" << sketch->temp.size;
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
++sketch->temp.size;
}
sketch->PushTemp();
}
};
/*! \brief training parameter of tree grower */
TrainParam param_;
/*! \brief queue of nodes to be expanded */
std::vector<int> qexpand_;
/*!
* \brief map active node to is working index offset in qexpand,
* can be -1, which means the node is node actively expanding
*/
std::vector<int> node2workindex_;
/*!
* \brief position of each instance in the tree
* can be negative, which means this position is no longer expanding
* see also Decode/EncodePosition
*/
std::vector<int> position_;
private:
inline void UpdateNode2WorkIndex(const RegTree &tree) {
// update the node2workindex
std::fill(node2workindex_.begin(), node2workindex_.end(), -1);
node2workindex_.resize(tree.param.num_nodes);
for (size_t i = 0; i < qexpand_.size(); ++i) {
node2workindex_[qexpand_[i]] = static_cast<int>(i);
}
}
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
|
setup.h | //////////////////////////////////////////////////////////////////////////////////
// //
// trueke //
// A multi-GPU implementation of the exchange Monte Carlo method. //
// //
//////////////////////////////////////////////////////////////////////////////////
// //
// Copyright © 2015 Cristobal A. Navarro, Wei Huang. //
// //
// This file is part of trueke. //
// trueke is free software: you can redistribute it and/or modify //
// it under the terms of the GNU General Public License as published by //
// the Free Software Foundation, either version 3 of the License, or //
// (at your option) any later version. //
// //
// trueke is distributed in the hope that it will be useful, //
// but WITHOUT ANY WARRANTY; without even the implied warranty of //
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the //
// GNU General Public License for more details. //
// //
// You should have received a copy of the GNU General Public License //
// along with trueke. If not, see <http://www.gnu.org/licenses/>. //
// //
//////////////////////////////////////////////////////////////////////////////////
#ifndef _SETUP_H_
#define _SETUP_H_
/* function declarations */
void pickgpus(setup_t *s);
void init(setup_t *s, int argc, int argv);
void adapt_init(setup_t *s, int argc, char argv);
void printparams(setup_t *s);
void getparams(setup_t *s, int argc, char **argv);
void newseed(int *seed);
void malloc_arrays(setup_t *s);
void adapt_malloc_arrays(setup_t *s);
void reset(setup_t *s);
void adjustparams(setup_t *s);
/* adapt init */
void adapt_init(setup_t *s, int argc, char **argv){
printf("adapt_init....{\n");
fflush (stdout);
/* get parameters */
getparams(s, argc, argv);
/* adjust some parameters related to memory pool and active replicas*/
adjustparams(s);
#ifdef MEASURE
/* folders for output */
s->obsfolder = "data";
s->plotfolder = "plots";
make_output_folders(s->obsfolder, s->plotfolder);
#endif
/* parameter seed or random seed */
if(s->seed != 0){
gpu_pcg32_srandom_r(&s->hpcgs, &s->hpcgi, s->seed, 1);
}
else{
gpu_pcg32_srandom_r(&s->hpcgs, &s->hpcgi, devseed(), 1);
}
s->seed = gpu_pcg32_random_r(&s->hpcgs, &s->hpcgi);
/* pick the GPUs */
pickgpus(s);
/* set the number of threads as the number of GPUs */
omp_set_num_threads(s->ngpus);
/* build the space of computation for the lattices */
s->mcblock = dim3(BX, BY / 2, BZ);
s->mcgrid = dim3((s->L + BX - 1) / BX, (s->L + BY - 1) / (2 * BY),
(s->L + BZ - 1) / BZ);
s->lblock = dim3(BLOCKSIZE1D, 1, 1);
s->lgrid = dim3((s->N + BLOCKSIZE1D - 1) / BLOCKSIZE1D, 1, 1);
/* build the space of computation for random numbers and lattice simulation */
s->prng_block = dim3(BLOCKSIZE1D, 1, 1);
s->prng_grid = dim3(((s->N / 4) + BLOCKSIZE1D - 1) / BLOCKSIZE1D, 1, 1);
/* allocate main arrays */
adapt_malloc_arrays(s);
/* create timers */
sdkCreateTimer(&(s->timer));
sdkCreateTimer(&(s->gtimer));
sdkCreateTimer(&(s->ktimer));
/* reset timers */
sdkResetTimer(&(s->timer));
sdkResetTimer(&(s->gtimer));
sdkResetTimer(&(s->ktimer));
/* print parameters */
printparams(s);
//printf("}:ok\n\n");
fflush(stdout);
}
/* adapt malloc */
void adapt_malloc_arrays( setup_t *s ){
/* multi-gpu adaptation arrays */
s->mdlat = (int***) malloc(sizeof(int**) * s->ngpus);
s->aex = (float**)malloc(sizeof(float*)*s->ngpus);
s->aavex = (float**)malloc(sizeof(float*)*s->ngpus);
s->aexE = (float**)malloc(sizeof(float*)*s->ngpus);
s->arstream = (cudaStream_t**)malloc(sizeof(cudaStream_t*) * s->ngpus);
s->apcga = (uint64_t***)malloc(sizeof(uint64_t**) * s->ngpus);
s->apcgb = (uint64_t***)malloc(sizeof(uint64_t**) * s->ngpus);
s->dH = (int **)malloc(sizeof(int*) * s->ngpus);
s->dE = (float**)malloc(sizeof(float*) * s->ngpus);
s->arts = (findex_t**)malloc(sizeof(findex_t*) * s->ngpus);
s->atrs = (findex_t**)malloc(sizeof(findex_t*) * s->ngpus);
s->aT = (float**)malloc(sizeof(float*)*s->ngpus);
/* T is a sorted temp array */
s->T = (float*)malloc(sizeof(float)*s->Ra);
/* host values for each replica */
s->E = (float*)malloc(sizeof(float)*s->Ra);
// memory for H array
s->hH = (int*)malloc(sizeof(int) * s->N);
/* multi-GPU setup */
#pragma omp parallel
{
int tid, nt, r;
/* set threads */
adapt_threadset(s, &tid, &nt, &r);
//printf("arge malloc: tid=%i r=%i rpool = %i\n", tid, r, s->rpool[tid]); fflush(stdout);
/* allocate the replica pool for each GPU */
s->mdlat[tid] = (int**) malloc(sizeof(int *) * s->rpool[tid]);
/* ex is a per temperature counter array */
s->aex[tid] = (float*)malloc(sizeof(float)*s->rpool[tid]);
/* avex is a per temperature counter array */
s->aavex[tid] = (float*)malloc(sizeof(float)*s->rpool[tid]);
/* exchange energies */
s->aexE[tid] = (float*)malloc(sizeof(float) * s->rpool[tid]);
/* CUDA streams */
s->arstream[tid] = (cudaStream_t*)malloc(sizeof(cudaStream_t) * s->rpool[tid]);
/* PRNG states volume, one state per thread */
s->apcga[tid] = (uint64_t**)malloc(sizeof(uint64_t*) * s->rpool[tid]);
s->apcgb[tid] = (uint64_t**)malloc(sizeof(uint64_t*) * s->rpool[tid]);
/* fragmented indices for replicas temperature sorted */
s->arts[tid] = (findex_t*)malloc(sizeof(findex_t)*s->rpool[tid]);
/* fragmented indices for temperatures replica sorted */
s->atrs[tid] = (findex_t*)malloc(sizeof(findex_t)*s->rpool[tid]);
/* fragmented temperatures sorted */
s->aT[tid] = (float*)malloc(sizeof(float)*s->rpool[tid]);
/* malloc device magnetic field -- multi-GPU */
checkCudaErrors(cudaMalloc(&(s->dH[tid]), sizeof(int)*s->N));
/* malloc device energy reductions -- multi-GPU*/
checkCudaErrors(cudaMalloc(&(s->dE[tid]), sizeof(float)*s->rpool[tid]));
/* malloc the data for 'r' replicas on each GPU */
for(int k = 0; k < s->rpool[tid]; ++k){
checkCudaErrors(cudaMalloc(&(s->mdlat[tid][k]), sizeof(int) * s->N));
checkCudaErrors(cudaMalloc(&(s->apcga[tid][k]), (s->N/4) * sizeof(uint64_t)));
checkCudaErrors(cudaMalloc(&(s->apcgb[tid][k]), (s->N/4) * sizeof(uint64_t)));
checkCudaErrors(cudaStreamCreateWithFlags(&(s->arstream[tid][k]), cudaStreamNonBlocking));
// offset and sequence approach
kernel_gpupcg_setup<<<s->prng_grid, s->prng_block, 0, s->arstream[tid][k] >>>(s->apcga[tid][k], s->apcgb[tid][k], s->N/4, s->seed + (unsigned long long)(s->N/4 * (s->rpool[tid]*tid + k)), (s->rpool[tid]*tid + k));
//printf("tid=%i N=%i N/4 = %i R = %i seed = %lu k = %lu \n", tid, s->N, s->N/4, s->R, s->seed + (unsigned long long)(s->N/4 * (s->rpool[tid]*tid + k)), (s->rpool[tid]*tid + k));
//getchar();
// skip ahead approach
//kernel_gpupcg_setup_offset<<<s->prng_grid, s->prng_block, 0, s->arstream[tid][k] >>>(s->apcga[tid][k], s->apcgb[tid][k], s->N/4, s->seed, (unsigned long long)((s->ms * s->pts + s->ds)*4*s->realizations), (s->L^3)/4 * (s->R/s->ngpus * tid + k) );
cudaCheckErrors("kernel: prng reset");
}
}
/* host memory setup for each replica */
for(int i = 0; i < s->R; i++){
/* array of temperatures increasing order */
s->T[i] = s->TR - (s->R-1 - i)*s->dT;
}
int count = 0;
for(int k = 0; k < s->ngpus; ++k){
for(int j = 0; j < s->gpur[k]; ++j){
s->arts[k][j] = s->atrs[k][j] = (findex_t){k, j};
s->aT[k][j] = s->TR - (float)(s->R-1 - count)*s->dT;
s->aex[k][j] = 0;
++count;
}
}
}
/* set parameters */
void adjustparams(setup_t *s){
/* total number of spins per replica */
s->N = (s->L)*(s->L)*(s->L);
/* shared memory steps */
s->cs = BLOCK_STEPS;
/* keep original parameter R */
s->Ro = s->R;
/* adjust R to a multiple of ngpus; R' = ceil(R/ngpus) *ngpus */
s->R = (int)ceil((float)s->R/(float)s->ngpus) * s->ngpus;
/* compute Ra to be the final size Ra = R + TL */
s->Ra = s->R + (s->atrials * s->ains);
/* set replica pools for each GPU */
s->gpur = (int*)malloc(sizeof(int) * s->ngpus);
s->rpool = (int*)malloc(sizeof(int) * s->ngpus);
/* measure zone */
if( s->mzone == -1 ){
s->mzone = (int) ((double)s->pts / log2(2.0 + sqrtf((double)s->pts)/(double)s->L) );
}
/* last adaptation insert */
s->fam = 0;
/* record original seed */
s->oseed = s->seed;
for(int i=0; i < s->ngpus; ++i){
/* active replicas per gpu */
s->gpur[i] = s->R / s->ngpus;
//printf("s->gpur[%i] = %i\n", i, s->gpur[i]); fflush(stdout); getchar();
/* replica pool per gpu */
s->rpool[i] = s->Ra / s->ngpus;
/* place the remainder of replicas */
if( i < (s->Ra % s->ngpus) ){
s->rpool[i] += 1;
}
}
}
/* init */
void init(setup_t *s, int argc, char **argv){
/* set the number of threads as the number of GPUs */
//omp_set_num_threads(s->ngpus);
//gpu_pcg32_srandom_r(&s->hpcgs, &s->hpcgi, s->seed, 1);
// get another seed from master seeder
//s->seed = gpu_pcg32_random_r(&s->hpcgs, &s->hpcgi);
/* build the space of computation for the lattices */
s->mcblock = dim3(BX, BY/2, BZ);
s->mcgrid = dim3((s->L + BX - 1)/BX, (s->L + BY - 1)/(2*BY), (s->L + BZ - 1)/BZ);
s->lblock = dim3( BLOCKSIZE1D, 1, 1);
s->lgrid = dim3((s->N + BLOCKSIZE1D - 1)/BLOCKSIZE1D, 1, 1);
/* build the space of computation for random numbers and lattice simulation */
s->prng_block = dim3(BLOCKSIZE1D, 1, 1);
s->prng_grid = dim3( ((s->N/4) + BLOCKSIZE1D - 1)/BLOCKSIZE1D, 1, 1);
/* alocate main arrays */
malloc_arrays(s);
/* reset table of obersvables per realization */
#ifdef MEASURE
reset_realization_statistics(s, s->R);
#endif
}
/* malloc arrays */
void malloc_arrays( setup_t *s ){
/* allocate the main arrays */
s->hlat = (int **)malloc(sizeof(int *) * s->R);
s->dlat = (int **)malloc(sizeof(int *) * s->R);
/* T is a sorted temp array */
s->T = (float*)malloc(sizeof(float)*s->R);
/* ex is a per temperature counter array */
s->ex = (float*)malloc(sizeof(float)*s->R);
/* avex is a per temperature counter array */
s->avex = (float*)malloc(sizeof(float)*s->R);
/* index arrays */
s->rts = (int*)malloc(sizeof(int)*s->R);
s->trs = (int*)malloc(sizeof(int)*s->R);
/* host values for each replica */
s->E = (float*)malloc(sizeof(float)*s->R);
s->exE = (float*)malloc(sizeof(float) * s->R);
s->M = (int*)malloc(sizeof(int)*s->R);
s->F1 = (float3*)malloc(sizeof(float3)*s->R);
s->F2 = (float3*)malloc(sizeof(float3)*s->R);
/* CUDA streams */
s->rstream = (cudaStream_t*)malloc(sizeof(cudaStream_t) * s->R);
/* PRNG states volume, one state per thread */
s->pcga = (uint64_t **)malloc(sizeof(uint64_t *) * s->R);
s->pcgb = (uint64_t **)malloc(sizeof(uint64_t *) * s->R);
/* observables table */
s->obstable = (obset_t*)malloc(sizeof(obset_t)*s->R);
// memory for H array
s->hH = (int*)malloc(sizeof(int) * s->N);
/* global index of the first replica in each GPU */
/* a copy of the magnetic field 'dH' on each GPU */
s->dH = (int **)malloc(sizeof(int*) * s->ngpus);
/* device values for GPUs */
s->dE = (float**)malloc(sizeof(float*) * s->ngpus);
s->dM = (int**)malloc(sizeof(int*) * s->ngpus);
s->dF1 = (float3**)malloc(sizeof(float3*) * s->ngpus);
s->dF2 = (float3**)malloc(sizeof(float3*) * s->ngpus);
/* multi-GPU setup */
#pragma omp parallel
{
int tid, nt, r, k;
/* set threads */
threadset(s, &tid, &nt, &r);
/* malloc the data for 'r' replicas on each GPU */
for(int j = 0; j < r; ++j){
k = tid * r + j;
checkCudaErrors(cudaMalloc(&(s->dlat[k]), sizeof(int) * s->N));
checkCudaErrors(cudaMalloc(&(s->pcga[k]), (s->N/4) * sizeof(uint64_t)));
checkCudaErrors(cudaMalloc(&(s->pcgb[k]), (s->N/4) * sizeof(uint64_t)));
checkCudaErrors(cudaStreamCreateWithFlags(&(s->rstream[k]), cudaStreamNonBlocking));
kernel_gpupcg_setup<<<s->prng_grid, s->prng_block, 0, s->rstream[k] >>>(s->pcga[k], s->pcgb[k], s->N/4, s->seed + s->N/4 * k, k);
//printf("thread %i, N=%i N/4 = %i R = %i ngpus = %i R/ngpus = %i k = %i kN/4 = %i seed = %lu \n", tid, s->N, s->N/4, s->R, s->ngpus, s->R/s->ngpus, k, s->N/4 * k, s->seed + s->N/4*k);
//getchar();
//cudaDeviceSynchronize();
cudaCheckErrors("kernel: prng reset");
}
/* malloc device magnetic field -- multi-GPU */
checkCudaErrors(cudaMalloc(&(s->dH[tid]), sizeof(int)*s->N));
/* malloc device energy reductions -- multi-GPU*/
checkCudaErrors(cudaMalloc(&(s->dE[tid]), sizeof(float)*r));
checkCudaErrors(cudaMalloc(&(s->dM[tid]), sizeof(int)*r));
checkCudaErrors(cudaMalloc(&(s->dF1[tid]), sizeof(float3)*r));
checkCudaErrors(cudaMalloc(&(s->dF2[tid]), sizeof(float3)*r));
/* P2P memory access is not working properly, for the moment just use standard device-host-device transfers */
/* enable peer to peer memory access between GPUs */
//if(tid != 0){
//int access;
//printf("\tGPU%i PeerAccess to GPU%i.....", s->gpus[tid].i, s->gpus[0].i); fflush(stdout);
//checkCudaErrors(cudaDeviceCanAccessPeer(&access, s->gpus[tid].i, s->gpus[0].i));
//printf("%i\n", access); fflush(stdout);
//checkCudaErrors(cudaDeviceEnablePeerAccess( s->gpus[0].i, 0 ));
//}
//else{
//checkCudaErrors(cudaDeviceEnablePeerAccess( s->gpus[1].i, 0 ));
//}
}
/* host memory setup for each replica */
for(int i = 0; i < s->R; i++){
/* replica allocation */
s->hlat[i]= (int*)malloc(sizeof(int) * s->N);
/* array of temperatures increasing order */
s->T[i] = s->TR - (s->R-1 - i)*s->dT;
/* exchange counters initialization */
s->ex[i] = 0;
/* initialize index arrays */
s->rts[i] = s->trs[i] = i;
}
int count = 0;
/* flatten the temperatures */
for(int i=0; i<s->ngpus; ++i){
for(int j=0; j<s->gpur[i]; ++j){
s->T[count++] = s->aT[i][j];
}
}
printarray<float>(s->T, s->R, "T");
printf("\n");
}
/* pick the idlest 'n' gpus */
void pickgpus( setup_t *s ){
/* structs for handling GPU queries error codes */
nvmlReturn_t r;
/* some function variables */
unsigned int devcount, i, u;
/* struct with GPU information */
gpu_t *gpus;
char version[80];
/* init nvml library for GPU queries */
r = nvmlInit();
nvml_check(r, "nvmlInit");
/* nvml: get driver version */
r = nvmlSystemGetDriverVersion(version, 80);
nvml_check(r, "nvmlSystemGetDriverVersion");
printf("\n\tDriver version: %s \n", version);
/* get number of devices */
r = nvmlDeviceGetCount(&devcount);
nvml_check(r, "nvmlDeviceGetCount");
printf("\tMAXGPUS = %d\n", devcount);
/* malloc one gpu_t struct for each device */
gpus = (gpu_t*)malloc(sizeof(gpu_t)*devcount);
/* return error if n > devcount */
if( s->ngpus > devcount){
fprintf(stderr, "pt error: [g = %i] > [MAXGPUS = %i]. (try g <= MAXGPUS)\n", s->ngpus, devcount);
exit(1);
}
/* get the information of each GPU */
printf("\tListing devices:\n");
for(i = 0; i < devcount; i++){
unsigned int index;
nvmlDevice_t dev;
char name[64];
char uuid[128];
//nvmlComputeMode_t compute_mode;
nvmlUtilization_t util;
r = nvmlDeviceGetHandleByIndex(i, &dev);
nvml_check(r, "nvmlDeviceGetHandleByIndex");
r = nvmlDeviceGetName(dev, name, sizeof(name)/sizeof(name[0]));
nvml_check(r, "nvmlDeviceGetName");
r = nvmlDeviceGetIndex(dev, &index);
r = nvmlDeviceGetUUID(dev, uuid, 128);
printf("\t\tGPU%d %s, index=%i, UUID=%s", i, name, index, uuid);
r = nvmlDeviceGetUtilizationRates(dev, &util);
u = nvml_check(r, "nvmlDeviceGetUtilizationRates");
if(u){
printf(" -> util = %i%%\n", util.gpu);
gpus[i].i = index;
gpus[i].u = util.gpu;
gpus[i].m = util.memory;
}
else{
gpus[i].i = i;
}
}
if(u){
//printf("not sorted\n");
//for(i = 0; i < devcount; i++)
// printf("gpu[%i] = (i,u,m) ---> (%i, %i, %i)\n", i, gpus[i].i, gpus[i].u, gpus[i].m);
//printf("sorted\n");
qsort(gpus, devcount, sizeof(gpu), compgpu);
//for(i = 0; i < devcount; i++)
// printf("gpu[%i] = (i,u,m) ---> (%i, %i, %i)\n", i, gpus[i].i, gpus[i].u, gpus[i].m);
}
/* malloc info for 'n' GPUs */
s->gpus = (gpu_t*)malloc(sizeof(gpu_t)*s->ngpus);
printf("\tchosen GPU(s) = {");
for(i = 0; i < s->ngpus; i++){
s->gpus[i] = gpus[i];
printf(" GPU%i", s->gpus[i].i);
}
printf(" }\n");
/* shutdown the nvml library */
r = nvmlShutdown();
nvml_check(r, "nvmlShutdown");
/* free the auxiliary gpu_t array */
free(gpus);
}
/* print parameters */
void printparams(setup_t *s){
printf("\tparameters:{\n");
printf("\t\tL: %i\n", s->L);
printf("\t\tvolume: %i\n", s->N);
printf("\t\t[TR,dT]: [%f, %f]\n", s->TR, s->dT);
printf("\t\t[atrials, ains, apts, ams]: [%i, %i, %i, %i]\n", s->atrials, s->ains, s->apts, s->ams);
printf("\t\tmag_field h: %f\n", s->h);
printf("\t\treplicas: %i\n", s->R);
printf("\t\tptsteps: %i\n", s->pts);
printf("\t\tmzone: %i\n", s->mzone);
printf("\t\tdrop_steps: %i\n", s->ds);
printf("\t\tmcsteps: %i\n", s->ms);
printf("\t\tmeasure: %i\n", s->fs);
printf("\t\tperiod: %i\n", s->period);
printf("\t\tnblocks: %i\n", s->blocks);
printf("\t\trealizations: %i\n", s->realizations);
printf("\t\tseed: %lu\n", s->seed);
printf("\t\tmicrosteps: %i\n", s->cs);
printf("\t\tNGPUS: %i\n\t}\n", s->ngpus);
/* print space of computation */
printf("\tsoc{\n\t\tmcgrid is %i x %i x %i mcblock %i x %i x %i\n\t\tlgrid is %i x %i x %i lblock %i x %i x %i \n\t}\n",
s->mcgrid.x, s->mcgrid.y, s->mcgrid.z, s->mcblock.x, s->mcblock.y, s->mcblock.z,
s->lgrid.x, s->lgrid.y, s->lgrid.z, s->lblock.x, s->lblock.y, s->lblock.z);
}
/* get parameters */
void getparams(setup_t *s, int argc, char **argv){
/* if the number or arguments is not correct, stop the program */
if(argc != 28){
printf("run as:\n./bin/trueke -l <L> <R> -t <T> <dT> -a <tri> <ins> <pts> <ms> -h <h> -s <pts> <mz> <eq> <ms> <meas> <per> -br <b> <r> -z <seed> -g <x>\n");
exit(1);
}
else{
for(int i=0; i<argc; i++){
/* lattice size and number of replicas */
if(strcmp(argv[i],"-l") == 0){
s->L = atoi(argv[i+1]);
s->R = atoi(argv[i+2]);
}
/* get TR and dT */
else if(strcmp(argv[i],"-t") == 0){
s->TR = atof(argv[i+1]);
s->dT = atof(argv[i+2]);
}
/* the magnetic field constant */
else if(strcmp(argv[i],"-h") == 0){
s->h = atof(argv[i+1]);
}
/* ptsteps, drop steps, mc steps, final steps */
else if(strcmp(argv[i],"-s") == 0){
s->pts = atof(argv[i+1]);
s->mzone = atoi(argv[i+2]);
s->ds = atof(argv[i+3]);
s->ms = atof(argv[i+4]);
s->fs = atof(argv[i+5]);
s->period = atof(argv[i+6]);
}
/* number of measure blocks and realizations */
else if(strcmp(argv[i],"-br") == 0){
s->blocks = atof(argv[i+1]);
s->realizations = atof(argv[i+2]);
}
/* adaptative dt parameters */
else if(strcmp(argv[i], "-a") == 0){
s->atrials = atoi(argv[i+1]);
s->ains = atoi(argv[i+2]);
s->apts = atoi(argv[i+3]);
s->ams = atoi(argv[i+4]);
}
/* number of gpus */
else if(strcmp(argv[i],"-g") == 0){
s->ngpus = atoi(argv[i+1]);
}
/* seed, (pass 0 for /dev/urandom) */
else if(strcmp(argv[i],"-z") == 0){
s->seed = atoi(argv[i+1]);
}
}
}
if( (s->L % 32) != 0 )
fprintf(stderr, "lattice dimensional size must be multiples of 32");
}
#endif
|
BKTree.h | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifndef _SPTAG_COMMON_BKTREE_H_
#define _SPTAG_COMMON_BKTREE_H_
#include <stack>
#include <string>
#include <vector>
#include <shared_mutex>
#include "../VectorIndex.h"
#include "CommonUtils.h"
#include "QueryResultSet.h"
#include "WorkSpace.h"
#include "Dataset.h"
#include "DistanceUtils.h"
namespace SPTAG
{
namespace COMMON
{
// node type for storing BKT
struct BKTNode
{
SizeType centerid;
SizeType childStart;
SizeType childEnd;
BKTNode(SizeType cid = -1) : centerid(cid), childStart(-1), childEnd(-1) {}
};
template <typename T>
struct KmeansArgs {
int _K;
int _DK;
DimensionType _D;
int _T;
DistCalcMethod _M;
T* centers;
T* newTCenters;
SizeType* counts;
float* newCenters;
SizeType* newCounts;
int* label;
SizeType* clusterIdx;
float* clusterDist;
float* weightedCounts;
float* newWeightedCounts;
float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length);
KmeansArgs(int k, DimensionType dim, SizeType datasize, int threadnum, DistCalcMethod distMethod) : _K(k), _DK(k), _D(dim), _T(threadnum), _M(distMethod) {
centers = (T*)_mm_malloc(sizeof(T) * k * dim, ALIGN_SPTAG);
newTCenters = (T*)_mm_malloc(sizeof(T) * k * dim, ALIGN_SPTAG);
counts = new SizeType[k];
newCenters = new float[threadnum * k * dim];
newCounts = new SizeType[threadnum * k];
label = new int[datasize];
clusterIdx = new SizeType[threadnum * k];
clusterDist = new float[threadnum * k];
weightedCounts = new float[k];
newWeightedCounts = new float[threadnum * k];
fComputeDistance = COMMON::DistanceCalcSelector<T>(distMethod);
}
~KmeansArgs() {
_mm_free(centers);
_mm_free(newTCenters);
delete[] counts;
delete[] newCenters;
delete[] newCounts;
delete[] label;
delete[] clusterIdx;
delete[] clusterDist;
delete[] weightedCounts;
delete[] newWeightedCounts;
}
inline void ClearCounts() {
memset(newCounts, 0, sizeof(SizeType) * _T * _K);
memset(newWeightedCounts, 0, sizeof(float) * _T * _K);
}
inline void ClearCenters() {
memset(newCenters, 0, sizeof(float) * _T * _K * _D);
}
inline void ClearDists(float dist) {
for (int i = 0; i < _T * _K; i++) {
clusterIdx[i] = -1;
clusterDist[i] = dist;
}
}
void Shuffle(std::vector<SizeType>& indices, SizeType first, SizeType last) {
SizeType* pos = new SizeType[_K];
pos[0] = first;
for (int k = 1; k < _K; k++) pos[k] = pos[k - 1] + newCounts[k - 1];
for (int k = 0; k < _K; k++) {
if (newCounts[k] == 0) continue;
SizeType i = pos[k];
while (newCounts[k] > 0) {
SizeType swapid = pos[label[i]] + newCounts[label[i]] - 1;
newCounts[label[i]]--;
std::swap(indices[i], indices[swapid]);
std::swap(label[i], label[swapid]);
}
while (indices[i] != clusterIdx[k]) i++;
std::swap(indices[i], indices[pos[k] + counts[k] - 1]);
}
delete[] pos;
}
};
template <typename T>
float RefineCenters(const Dataset<T>& data, KmeansArgs<T>& args)
{
int maxcluster = -1;
SizeType maxCount = 0;
for (int k = 0; k < args._DK; k++) {
if (args.counts[k] > maxCount && args.newCounts[k] > 0 && DistanceUtils::ComputeDistance((T*)data[args.clusterIdx[k]], args.centers + k * args._D, args._D, DistCalcMethod::L2) > 1e-6)
{
maxcluster = k;
maxCount = args.counts[k];
}
}
if (maxcluster != -1 && (args.clusterIdx[maxcluster] < 0 || args.clusterIdx[maxcluster] >= data.R()))
LOG(Helper::LogLevel::LL_Debug, "maxcluster:%d(%d) Error dist:%f\n", maxcluster, args.newCounts[maxcluster], args.clusterDist[maxcluster]);
float diff = 0;
for (int k = 0; k < args._DK; k++) {
T* TCenter = args.newTCenters + k * args._D;
if (args.counts[k] == 0) {
if (maxcluster != -1) {
//int nextid = Utils::rand_int(last, first);
//while (args.label[nextid] != maxcluster) nextid = Utils::rand_int(last, first);
SizeType nextid = args.clusterIdx[maxcluster];
std::memcpy(TCenter, data[nextid], sizeof(T)*args._D);
}
else {
std::memcpy(TCenter, args.centers + k * args._D, sizeof(T)*args._D);
}
}
else {
float* currCenters = args.newCenters + k * args._D;
for (DimensionType j = 0; j < args._D; j++) currCenters[j] /= args.counts[k];
if (args._M == DistCalcMethod::Cosine) {
COMMON::Utils::Normalize(currCenters, args._D, COMMON::Utils::GetBase<T>());
}
for (DimensionType j = 0; j < args._D; j++) TCenter[j] = (T)(currCenters[j]);
}
diff += args.fComputeDistance(args.centers + k*args._D, TCenter, args._D);
}
return diff;
}
template <typename T>
inline float KmeansAssign(const Dataset<T>& data,
std::vector<SizeType>& indices,
const SizeType first, const SizeType last, KmeansArgs<T>& args,
const bool updateCenters, float lambda) {
float currDist = 0;
SizeType subsize = (last - first - 1) / args._T + 1;
#pragma omp parallel for num_threads(args._T) shared(data, indices) reduction(+:currDist)
for (int tid = 0; tid < args._T; tid++)
{
SizeType istart = first + tid * subsize;
SizeType iend = min(first + (tid + 1) * subsize, last);
SizeType *inewCounts = args.newCounts + tid * args._K;
float *inewCenters = args.newCenters + tid * args._K * args._D;
SizeType * iclusterIdx = args.clusterIdx + tid * args._K;
float * iclusterDist = args.clusterDist + tid * args._K;
float * iweightedCounts = args.newWeightedCounts + tid * args._K;
float idist = 0;
for (SizeType i = istart; i < iend; i++) {
int clusterid = 0;
float smallestDist = MaxDist;
for (int k = 0; k < args._DK; k++) {
float dist = args.fComputeDistance(data[indices[i]], args.centers + k*args._D, args._D) + lambda*args.counts[k];
if (dist > -MaxDist && dist < smallestDist) {
clusterid = k; smallestDist = dist;
}
}
args.label[i] = clusterid;
inewCounts[clusterid]++;
iweightedCounts[clusterid] += smallestDist;
idist += smallestDist;
if (updateCenters) {
const T* v = (const T*)data[indices[i]];
float* center = inewCenters + clusterid*args._D;
for (DimensionType j = 0; j < args._D; j++) center[j] += v[j];
if (smallestDist > iclusterDist[clusterid]) {
iclusterDist[clusterid] = smallestDist;
iclusterIdx[clusterid] = indices[i];
}
}
else {
if (smallestDist <= iclusterDist[clusterid]) {
iclusterDist[clusterid] = smallestDist;
iclusterIdx[clusterid] = indices[i];
}
}
}
currDist += idist;
}
for (int i = 1; i < args._T; i++) {
for (int k = 0; k < args._DK; k++) {
args.newCounts[k] += args.newCounts[i * args._K + k];
args.newWeightedCounts[k] += args.newWeightedCounts[i * args._K + k];
}
}
if (updateCenters) {
for (int i = 1; i < args._T; i++) {
float* currCenter = args.newCenters + i*args._K*args._D;
for (size_t j = 0; j < ((size_t)args._DK) * args._D; j++) args.newCenters[j] += currCenter[j];
for (int k = 0; k < args._DK; k++) {
if (args.clusterIdx[i*args._K + k] != -1 && args.clusterDist[i*args._K + k] > args.clusterDist[k]) {
args.clusterDist[k] = args.clusterDist[i*args._K + k];
args.clusterIdx[k] = args.clusterIdx[i*args._K + k];
}
}
}
}
else {
for (int i = 1; i < args._T; i++) {
for (int k = 0; k < args._DK; k++) {
if (args.clusterIdx[i*args._K + k] != -1 && args.clusterDist[i*args._K + k] <= args.clusterDist[k]) {
args.clusterDist[k] = args.clusterDist[i*args._K + k];
args.clusterIdx[k] = args.clusterIdx[i*args._K + k];
}
}
}
}
return currDist;
}
template <typename T>
inline float InitCenters(const Dataset<T>& data,
std::vector<SizeType>& indices, const SizeType first, const SizeType last,
KmeansArgs<T>& args, int samples, int tryIters) {
SizeType batchEnd = min(first + samples, last);
float lambda, currDist, minClusterDist = MaxDist;
for (int numKmeans = 0; numKmeans < tryIters; numKmeans++) {
for (int k = 0; k < args._DK; k++) {
SizeType randid = COMMON::Utils::rand(last, first);
std::memcpy(args.centers + k*args._D, data[indices[randid]], sizeof(T)*args._D);
}
args.ClearCounts();
args.ClearDists(MaxDist);
currDist = KmeansAssign(data, indices, first, batchEnd, args, false, 0);
if (currDist < minClusterDist) {
minClusterDist = currDist;
memcpy(args.newTCenters, args.centers, sizeof(T)*args._K*args._D);
memcpy(args.counts, args.newCounts, sizeof(SizeType) * args._K);
SizeType maxCluster = 0;
for (int k = 1; k < args._DK; k++) if (args.counts[k] > args.counts[maxCluster]) maxCluster = k;
float avgDist = args.newWeightedCounts[maxCluster] / args.counts[maxCluster];
lambda = (avgDist - args.clusterDist[maxCluster]) / args.counts[maxCluster];
if (lambda < 0) lambda = 0;
}
}
return lambda;
}
template <typename T>
float TryClustering(const Dataset<T>& data,
std::vector<SizeType>& indices, const SizeType first, const SizeType last,
KmeansArgs<T>& args, int samples = 1000, float lambdaFactor = 100.0f, bool debug = false, IAbortOperation* abort = nullptr) {
float adjustedLambda = InitCenters(data, indices, first, last, args, samples, 3);
if (abort && abort->ShouldAbort()) return 0;
SizeType batchEnd = min(first + samples, last);
float currDiff, currDist, minClusterDist = MaxDist;
int noImprovement = 0;
float originalLambda = COMMON::Utils::GetBase<T>() * COMMON::Utils::GetBase<T>() / lambdaFactor / (batchEnd - first);
for (int iter = 0; iter < 100; iter++) {
std::memcpy(args.centers, args.newTCenters, sizeof(T)*args._K*args._D);
std::random_shuffle(indices.begin() + first, indices.begin() + last);
args.ClearCenters();
args.ClearCounts();
args.ClearDists(-MaxDist);
currDist = KmeansAssign(data, indices, first, batchEnd, args, true, min(adjustedLambda, originalLambda));
std::memcpy(args.counts, args.newCounts, sizeof(SizeType) * args._K);
if (currDist < minClusterDist) {
noImprovement = 0;
minClusterDist = currDist;
}
else {
noImprovement++;
}
currDiff = RefineCenters(data, args);
//if (debug) LOG(Helper::LogLevel::LL_Info, "iter %d dist:%f diff:%f\n", iter, currDist, currDiff);
if (abort && abort->ShouldAbort()) return 0;
if (currDiff < 1e-3 || noImprovement >= 5) break;
}
args.ClearCounts();
args.ClearDists(MaxDist);
currDist = KmeansAssign(data, indices, first, last, args, false, 0);
std::memcpy(args.counts, args.newCounts, sizeof(SizeType) * args._K);
SizeType maxCount = 0, minCount = (std::numeric_limits<SizeType>::max)(), availableClusters = 0;
float CountStd = 0.0, CountAvg = (last - first) * 1.0f / args._DK;
for (int i = 0; i < args._DK; i++) {
if (args.counts[i] > maxCount) maxCount = args.counts[i];
if (args.counts[i] < minCount) minCount = args.counts[i];
CountStd += (args.counts[i] - CountAvg) * (args.counts[i] - CountAvg);
if (args.counts[i] > 0) availableClusters++;
}
CountStd = sqrt(CountStd / args._DK) / CountAvg;
if (debug) LOG(Helper::LogLevel::LL_Info, "Lambda:min(%g,%g) Max:%d Min:%d Avg:%f Std/Avg:%f Dist:%f NonZero/Total:%d/%d\n", originalLambda, adjustedLambda, maxCount, minCount, CountAvg, CountStd, currDist, availableClusters, args._DK);
return CountStd;
}
template <typename T>
float DynamicFactorSelect(const Dataset<T> & data,
std::vector<SizeType> & indices, const SizeType first, const SizeType last,
KmeansArgs<T> & args, int samples = 1000) {
float bestLambdaFactor = 100.0f, bestCountStd = (std::numeric_limits<float>::max)();
for (float lambdaFactor = 0.001f; lambdaFactor <= 1000.0f + 1e-3; lambdaFactor *= 10) {
float CountStd = TryClustering(data, indices, first, last, args, samples, lambdaFactor, true);
if (CountStd < bestCountStd) {
bestLambdaFactor = lambdaFactor;
bestCountStd = CountStd;
}
}
/*
std::vector<float> tries(16, 0);
for (int i = 0; i < 8; i++) {
tries[i] = bestLambdaFactor * (i + 2) / 10;
tries[8 + i] = bestLambdaFactor * (i + 2);
}
for (float lambdaFactor : tries) {
float CountStd = TryClustering(data, indices, first, last, args, samples, lambdaFactor, true);
if (CountStd < bestCountStd) {
bestLambdaFactor = lambdaFactor;
bestCountStd = CountStd;
}
}
*/
LOG(Helper::LogLevel::LL_Info, "Best Lambda Factor:%f\n", bestLambdaFactor);
return bestLambdaFactor;
}
template <typename T>
int KmeansClustering(const Dataset<T>& data,
std::vector<SizeType>& indices, const SizeType first, const SizeType last,
KmeansArgs<T>& args, int samples = 1000, float lambdaFactor = 100.0f, bool debug = false, IAbortOperation* abort = nullptr) {
TryClustering(data, indices, first, last, args, samples, lambdaFactor, debug, abort);
if (abort && abort->ShouldAbort()) return 1;
int numClusters = 0;
for (int i = 0; i < args._K; i++) if (args.counts[i] > 0) numClusters++;
if (numClusters <= 1) return numClusters;
args.Shuffle(indices, first, last);
return numClusters;
}
class BKTree
{
public:
BKTree(): m_iTreeNumber(1), m_iBKTKmeansK(32), m_iBKTLeafSize(8), m_iSamples(1000), m_fBalanceFactor(-1.0f), m_lock(new std::shared_timed_mutex) {}
BKTree(const BKTree& other): m_iTreeNumber(other.m_iTreeNumber),
m_iBKTKmeansK(other.m_iBKTKmeansK),
m_iBKTLeafSize(other.m_iBKTLeafSize),
m_iSamples(other.m_iSamples),
m_fBalanceFactor(other.m_fBalanceFactor),
m_lock(new std::shared_timed_mutex) {}
~BKTree() {}
inline const BKTNode& operator[](SizeType index) const { return m_pTreeRoots[index]; }
inline BKTNode& operator[](SizeType index) { return m_pTreeRoots[index]; }
inline SizeType size() const { return (SizeType)m_pTreeRoots.size(); }
inline SizeType sizePerTree() const {
std::shared_lock<std::shared_timed_mutex> lock(*m_lock);
return (SizeType)m_pTreeRoots.size() - m_pTreeStart.back();
}
inline const std::unordered_map<SizeType, SizeType>& GetSampleMap() const { return m_pSampleCenterMap; }
template <typename T>
void Rebuild(const Dataset<T>& data, DistCalcMethod distMethod, IAbortOperation* abort)
{
BKTree newTrees(*this);
newTrees.BuildTrees<T>(data, distMethod, 1, nullptr, nullptr, false, abort);
std::unique_lock<std::shared_timed_mutex> lock(*m_lock);
m_pTreeRoots.swap(newTrees.m_pTreeRoots);
m_pTreeStart.swap(newTrees.m_pTreeStart);
m_pSampleCenterMap.swap(newTrees.m_pSampleCenterMap);
}
template <typename T>
void BuildTrees(const Dataset<T>& data, DistCalcMethod distMethod, int numOfThreads,
std::vector<SizeType>* indices = nullptr, std::vector<SizeType>* reverseIndices = nullptr,
bool dynamicK = false, IAbortOperation* abort = nullptr)
{
struct BKTStackItem {
SizeType index, first, last;
bool debug;
BKTStackItem(SizeType index_, SizeType first_, SizeType last_, bool debug_ = false) : index(index_), first(first_), last(last_), debug(debug_) {}
};
std::stack<BKTStackItem> ss;
std::vector<SizeType> localindices;
if (indices == nullptr) {
localindices.resize(data.R());
for (SizeType i = 0; i < localindices.size(); i++) localindices[i] = i;
}
else {
localindices.assign(indices->begin(), indices->end());
}
KmeansArgs<T> args(m_iBKTKmeansK, data.C(), (SizeType)localindices.size(), numOfThreads, distMethod);
if (m_fBalanceFactor < 0) m_fBalanceFactor = DynamicFactorSelect(data, localindices, 0, (SizeType)localindices.size(), args, m_iSamples);
m_pSampleCenterMap.clear();
for (char i = 0; i < m_iTreeNumber; i++)
{
std::random_shuffle(localindices.begin(), localindices.end());
m_pTreeStart.push_back((SizeType)m_pTreeRoots.size());
m_pTreeRoots.emplace_back((SizeType)localindices.size());
LOG(Helper::LogLevel::LL_Info, "Start to build BKTree %d\n", i + 1);
ss.push(BKTStackItem(m_pTreeStart[i], 0, (SizeType)localindices.size(), true));
while (!ss.empty()) {
if (abort && abort->ShouldAbort()) return;
BKTStackItem item = ss.top(); ss.pop();
SizeType newBKTid = (SizeType)m_pTreeRoots.size();
m_pTreeRoots[item.index].childStart = newBKTid;
if (item.last - item.first <= m_iBKTLeafSize) {
for (SizeType j = item.first; j < item.last; j++) {
SizeType cid = (reverseIndices == nullptr)? localindices[j]: reverseIndices->at(localindices[j]);
m_pTreeRoots.emplace_back(cid);
}
}
else { // clustering the data into BKTKmeansK clusters
if (dynamicK) {
args._DK = std::min<int>((item.last - item.first) / m_iBKTLeafSize + 1, m_iBKTKmeansK);
args._DK = std::max<int>(args._DK, 2);
}
int numClusters = KmeansClustering(data, localindices, item.first, item.last, args, m_iSamples, m_fBalanceFactor, item.debug, abort);
if (numClusters <= 1) {
SizeType end = min(item.last + 1, (SizeType)localindices.size());
std::sort(localindices.begin() + item.first, localindices.begin() + end);
m_pTreeRoots[item.index].centerid = (reverseIndices == nullptr) ? localindices[item.first] : reverseIndices->at(localindices[item.first]);
m_pTreeRoots[item.index].childStart = -m_pTreeRoots[item.index].childStart;
for (SizeType j = item.first + 1; j < end; j++) {
SizeType cid = (reverseIndices == nullptr) ? localindices[j] : reverseIndices->at(localindices[j]);
m_pTreeRoots.emplace_back(cid);
m_pSampleCenterMap[cid] = m_pTreeRoots[item.index].centerid;
}
m_pSampleCenterMap[-1 - m_pTreeRoots[item.index].centerid] = item.index;
}
else {
SizeType maxCount = 0;
for (int k = 0; k < m_iBKTKmeansK; k++) if (args.counts[k] > maxCount) maxCount = args.counts[k];
for (int k = 0; k < m_iBKTKmeansK; k++) {
if (args.counts[k] == 0) continue;
SizeType cid = (reverseIndices == nullptr) ? localindices[item.first + args.counts[k] - 1] : reverseIndices->at(localindices[item.first + args.counts[k] - 1]);
m_pTreeRoots.emplace_back(cid);
if (args.counts[k] > 1) ss.push(BKTStackItem(newBKTid++, item.first, item.first + args.counts[k] - 1, item.debug && (args.counts[k] == maxCount)));
item.first += args.counts[k];
}
}
}
m_pTreeRoots[item.index].childEnd = (SizeType)m_pTreeRoots.size();
}
m_pTreeRoots.emplace_back(-1);
LOG(Helper::LogLevel::LL_Info, "%d BKTree built, %zu %zu\n", i + 1, m_pTreeRoots.size() - m_pTreeStart[i], localindices.size());
}
}
inline std::uint64_t BufferSize() const
{
return sizeof(int) + sizeof(SizeType) * m_iTreeNumber +
sizeof(SizeType) + sizeof(BKTNode) * m_pTreeRoots.size();
}
ErrorCode SaveTrees(std::shared_ptr<Helper::DiskPriorityIO> p_out) const
{
std::shared_lock<std::shared_timed_mutex> lock(*m_lock);
IOBINARY(p_out, WriteBinary, sizeof(m_iTreeNumber), (char*)&m_iTreeNumber);
IOBINARY(p_out, WriteBinary, sizeof(SizeType) * m_iTreeNumber, (char*)m_pTreeStart.data());
SizeType treeNodeSize = (SizeType)m_pTreeRoots.size();
IOBINARY(p_out, WriteBinary, sizeof(treeNodeSize), (char*)&treeNodeSize);
IOBINARY(p_out, WriteBinary, sizeof(BKTNode) * treeNodeSize, (char*)m_pTreeRoots.data());
LOG(Helper::LogLevel::LL_Info, "Save BKT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize);
return ErrorCode::Success;
}
ErrorCode SaveTrees(std::string sTreeFileName) const
{
LOG(Helper::LogLevel::LL_Info, "Save BKT to %s\n", sTreeFileName.c_str());
auto ptr = f_createIO();
if (ptr == nullptr || !ptr->Initialize(sTreeFileName.c_str(), std::ios::binary | std::ios::out)) return ErrorCode::FailedCreateFile;
return SaveTrees(ptr);
}
ErrorCode LoadTrees(char* pBKTMemFile)
{
m_iTreeNumber = *((int*)pBKTMemFile);
pBKTMemFile += sizeof(int);
m_pTreeStart.resize(m_iTreeNumber);
memcpy(m_pTreeStart.data(), pBKTMemFile, sizeof(SizeType) * m_iTreeNumber);
pBKTMemFile += sizeof(SizeType)*m_iTreeNumber;
SizeType treeNodeSize = *((SizeType*)pBKTMemFile);
pBKTMemFile += sizeof(SizeType);
m_pTreeRoots.resize(treeNodeSize);
memcpy(m_pTreeRoots.data(), pBKTMemFile, sizeof(BKTNode) * treeNodeSize);
if (m_pTreeRoots.size() > 0 && m_pTreeRoots.back().centerid != -1) m_pTreeRoots.emplace_back(-1);
LOG(Helper::LogLevel::LL_Info, "Load BKT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize);
return ErrorCode::Success;
}
ErrorCode LoadTrees(std::shared_ptr<Helper::DiskPriorityIO> p_input)
{
IOBINARY(p_input, ReadBinary, sizeof(m_iTreeNumber), (char*)&m_iTreeNumber);
m_pTreeStart.resize(m_iTreeNumber);
IOBINARY(p_input, ReadBinary, sizeof(SizeType) * m_iTreeNumber, (char*)m_pTreeStart.data());
SizeType treeNodeSize;
IOBINARY(p_input, ReadBinary, sizeof(treeNodeSize), (char*)&treeNodeSize);
m_pTreeRoots.resize(treeNodeSize);
IOBINARY(p_input, ReadBinary, sizeof(BKTNode) * treeNodeSize, (char*)m_pTreeRoots.data());
if (m_pTreeRoots.size() > 0 && m_pTreeRoots.back().centerid != -1) m_pTreeRoots.emplace_back(-1);
LOG(Helper::LogLevel::LL_Info, "Load BKT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize);
return ErrorCode::Success;
}
ErrorCode LoadTrees(std::string sTreeFileName)
{
LOG(Helper::LogLevel::LL_Info, "Load BKT From %s\n", sTreeFileName.c_str());
auto ptr = f_createIO();
if (ptr == nullptr || !ptr->Initialize(sTreeFileName.c_str(), std::ios::binary | std::ios::in)) return ErrorCode::FailedOpenFile;
return LoadTrees(ptr);
}
template <typename T>
void InitSearchTrees(const Dataset<T>& data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space) const
{
for (char i = 0; i < m_iTreeNumber; i++) {
const BKTNode& node = m_pTreeRoots[m_pTreeStart[i]];
if (node.childStart < 0) {
p_space.m_SPTQueue.insert(NodeDistPair(m_pTreeStart[i], fComputeDistance(p_query.GetTarget(), data[node.centerid], data.C())));
}
else {
for (SizeType begin = node.childStart; begin < node.childEnd; begin++) {
SizeType index = m_pTreeRoots[begin].centerid;
p_space.m_SPTQueue.insert(NodeDistPair(begin, fComputeDistance(p_query.GetTarget(), data[index], data.C())));
}
}
}
}
template <typename T>
void SearchTrees(const Dataset<T>& data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), const COMMON::QueryResultSet<T> &p_query,
COMMON::WorkSpace &p_space, const int p_limits) const
{
while (!p_space.m_SPTQueue.empty())
{
NodeDistPair bcell = p_space.m_SPTQueue.pop();
const BKTNode& tnode = m_pTreeRoots[bcell.node];
if (tnode.childStart < 0) {
if (!p_space.CheckAndSet(tnode.centerid)) {
p_space.m_iNumberOfCheckedLeaves++;
p_space.m_NGQueue.insert(NodeDistPair(tnode.centerid, bcell.distance));
}
if (p_space.m_iNumberOfCheckedLeaves >= p_limits) break;
}
else {
if (!p_space.CheckAndSet(tnode.centerid)) {
p_space.m_NGQueue.insert(NodeDistPair(tnode.centerid, bcell.distance));
}
for (SizeType begin = tnode.childStart; begin < tnode.childEnd; begin++) {
SizeType index = m_pTreeRoots[begin].centerid;
p_space.m_SPTQueue.insert(NodeDistPair(begin, fComputeDistance(p_query.GetTarget(), data[index], data.C())));
}
}
}
}
private:
std::vector<SizeType> m_pTreeStart;
std::vector<BKTNode> m_pTreeRoots;
std::unordered_map<SizeType, SizeType> m_pSampleCenterMap;
public:
std::unique_ptr<std::shared_timed_mutex> m_lock;
int m_iTreeNumber, m_iBKTKmeansK, m_iBKTLeafSize, m_iSamples;
float m_fBalanceFactor;
};
}
}
#endif
|
GB_binop__bget_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bget_uint16
// A.*B function (eWiseMult): GB_AemultB__bget_uint16
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bget_uint16
// C+=b function (dense accum): GB_Cdense_accumb__bget_uint16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bget_uint16
// C=scalar+B GB_bind1st__bget_uint16
// C=scalar+B' GB_bind1st_tran__bget_uint16
// C=A+scalar GB_bind2nd__bget_uint16
// C=A'+scalar GB_bind2nd_tran__bget_uint16
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = GB_BITGET (aij, bij, uint16_t, 16)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_BITGET (x, y, uint16_t, 16) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BGET || GxB_NO_UINT16 || GxB_NO_BGET_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bget_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bget_uint16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bget_uint16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *GB_RESTRICT Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bget_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bget_uint16
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bget_uint16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t bij = Bx [p] ;
Cx [p] = GB_BITGET (x, bij, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bget_uint16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
Cx [p] = GB_BITGET (aij, y, uint16_t, 16) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (x, aij, uint16_t, 16) ; \
}
GrB_Info GB_bind1st_tran__bget_uint16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = Ax [pA] ; \
Cx [pC] = GB_BITGET (aij, y, uint16_t, 16) ; \
}
GrB_Info GB_bind2nd_tran__bget_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
graph.h | // copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <stdio.h>
#include <cinttypes>
#include <iostream>
#include <type_traits>
#include <map>
#include "pvector.h"
#include "util.h"
#include "segmentgraph.h"
#include <memory>
#include <assert.h>
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_=int32_t, typename WeightT_=int32_t>
struct NodeWeight {
NodeID_ v;
WeightT_ w;
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
NodeID_ n_;
DestID_** g_index_;
public:
Neighborhood(NodeID_ n, DestID_** g_index) : n_(n), g_index_(g_index) {}
typedef DestID_* iterator;
iterator begin() { return g_index_[n_]; }
iterator end() { return g_index_[n_+1]; }
};
void ReleaseResources() {
//added a second condition to prevent double free (transpose graphs)
/*
if (out_index_ != nullptr)
delete[] out_index_;
if (out_neighbors_ != nullptr)
delete[] out_neighbors_;
if (directed_) {
if (in_index_ != nullptr && in_index_ != out_index_)
delete[] in_index_;
if (in_neighbors_ != nullptr && in_neighbors_ != out_neighbors_)
delete[] in_neighbors_;
}
if (flags_ != nullptr)
delete[] flags_;
*/
out_index_shared_.reset();
out_neighbors_shared_.reset();
in_index_shared_.reset();
in_neighbors_shared_.reset();
flags_shared_.reset();
offsets_shared_.reset();
for (auto iter = label_to_segment.begin(); iter != label_to_segment.end(); iter++) {
delete ((*iter).second);
}
}
public:
#ifndef IGNORE_JULIENNE_TYPES
julienne::graph<julienne::symmetricVertex> julienne_graph = __julienne_null_graph;
//julienne::EdgeMap<julienne::uintE, julienne::symmetricVertex> *em;
#endif
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1),
out_index_(nullptr), out_neighbors_(nullptr),
in_index_(nullptr), in_neighbors_(nullptr), flags_(nullptr), is_transpose_(false) {}
CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) :
directed_(false), num_nodes_(num_nodes),
out_index_(index), out_neighbors_(neighs),
in_index_(index), in_neighbors_(neighs){
out_index_shared_.reset(index);
out_neighbors_shared_.reset(neighs);
in_index_shared_ = out_index_shared_;
in_neighbors_shared_ = out_neighbors_shared_;
num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2;
//adding flags used for deduplication
flags_ = new int[num_nodes_];
flags_shared_.reset(flags_);
//adding offsets for load balacne scheme
SetUpOffsets(true);
//Set this up for getting random neighbors
srand(time(NULL));
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs), is_transpose_(false){
num_edges_ = out_index_[num_nodes_] - out_index_[0];
out_index_shared_.reset(out_index);
out_neighbors_shared_.reset(out_neighs);
in_index_shared_.reset(in_index);
in_neighbors_shared_.reset(in_neighs);
flags_ = new int[num_nodes_];
flags_shared_.reset(flags_);
SetUpOffsets(true);
//Set this up for getting random neighbors
srand(time(NULL));
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs, bool is_transpose) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs) , is_transpose_(is_transpose){
num_edges_ = out_index_[num_nodes_] - out_index_[0];
out_index_shared_.reset(out_index);
out_neighbors_shared_.reset(out_neighs);
in_index_shared_.reset(in_index);
in_neighbors_shared_.reset(in_neighs);
flags_ = new int[num_nodes_];
flags_shared_.reset(flags_);
SetUpOffsets(true);
//Set this up for getting random neighbors
srand(time(NULL));
}
CSRGraph(int64_t num_nodes, std::shared_ptr<DestID_*> out_index, std::shared_ptr<DestID_> out_neighs,
shared_ptr<DestID_*> in_index, shared_ptr<DestID_> in_neighs, bool is_transpose) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index.get()), out_neighbors_(out_neighs.get()),
in_index_(in_index.get()), in_neighbors_(in_neighs.get()) , is_transpose_(is_transpose){
num_edges_ = out_index_[num_nodes_] - out_index_[0];
out_index_shared_ = (out_index);
out_neighbors_shared_ = (out_neighs);
in_index_shared_ = (in_index);
in_neighbors_shared_ = (in_neighs);
flags_ = new int[num_nodes_];
flags_shared_.reset(flags_);
SetUpOffsets(true);
//Set this up for getting random neighbors
srand(time(NULL));
}
CSRGraph(CSRGraph& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_), is_transpose_(false){
/* Commenting this because object is not taking owner ship of the elements, notice destructor_free is set to false
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
other.flags_ = nullptr;
other.offsets_ = nullptr;
*/
out_index_shared_ = other.out_index_shared_;
out_neighbors_shared_ = other.out_neighbors_shared_;
in_index_shared_ = other.in_index_shared_;
in_neighbors_shared_ = other.in_neighbors_shared_;
//Set this up for getting random neighbors
srand(time(NULL));
}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_), is_transpose_(false){
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
other.flags_ = nullptr;
other.offsets_ = nullptr;
out_index_shared_ = other.out_index_shared_;
out_neighbors_shared_ = other.out_neighbors_shared_;
in_index_shared_ = other.in_index_shared_;
in_neighbors_shared_ = other.in_neighbors_shared_;
other.out_index_shared_.reset();
other.out_neighbors_shared_.reset();
other.in_index_shared_.reset();
other.in_neighbors_shared_.reset();
other.flags_shared_.reset();
other.offsets_shared_.reset();
//Set this up for getting random neighbors
srand(time(NULL));
}
~CSRGraph() {
if (!is_transpose_)
ReleaseResources();
}
CSRGraph& operator=(CSRGraph& other) {
if (this != &other) {
if (!is_transpose_)
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
out_index_shared_ = other.out_index_shared_;
out_neighbors_shared_ = other.out_neighbors_shared_;
in_index_shared_ = other.in_index_shared_;
in_neighbors_shared_ = other.in_neighbors_shared_;
//need the following, otherwise would get double free errors
/*
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
other.flags_ = nullptr;
other.offsets_ = nullptr;
*/
}
return *this;
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
if (!is_transpose_ )
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
out_index_shared_ = other.out_index_shared_;
out_neighbors_shared_ = other.out_neighbors_shared_;
in_index_shared_ = other.in_index_shared_;
in_neighbors_shared_ = other.in_neighbors_shared_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
other.flags_ = nullptr;
other.offsets_ = nullptr;
other.out_index_shared_.reset();
other.out_neighbors_shared_.reset();
other.in_index_shared_.reset();
other.in_neighbors_shared_.reset();
other.flags_shared_.reset();
other.offsets_shared_.reset();
}
return *this;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return out_index_[v+1] - out_index_[v];
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return in_index_[v+1] - in_index_[v];
}
Neighborhood out_neigh(NodeID_ n) const {
return Neighborhood(n, out_index_);
}
Neighborhood in_neigh(NodeID_ n) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return Neighborhood(n, in_index_);
}
NodeID_ get_random_out_neigh(NodeID_ n) {
int num_nghs = out_degree(n);
assert(num_nghs!=0);
int rand_index = rand() % num_nghs;
return out_index_[n][rand_index];
}
NodeID_ get_random_in_neigh(NodeID_ n) {
int num_nghs = in_degree(n);
assert(num_nghs!=0);
int rand_index = rand() % num_nghs;
return in_index_[n][rand_index];
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
pvector<SGOffset> offsets(num_nodes_+1);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets[n] = in_index_[n] - in_index_[0];
else
offsets[n] = out_index_[n] - out_index_[0];
return offsets;
}
void SetUpOffsets(bool in_graph = false) {
offsets_ = new SGOffset[num_nodes_+1];
offsets_shared_.reset(offsets_);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets_[n] = in_index_[n] - in_index_[0];
else
offsets_[n] = out_index_[n] - out_index_[0];
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
SegmentedGraph<DestID_, NodeID_>* getSegmentedGraph(std::string label, int id) {
return label_to_segment[label]->getSegmentedGraph(id);
}
int getNumSegments(std::string label) {
return label_to_segment[label]->numSegments;
}
void buildPullSegmentedGraphs(std::string label, int numSegments, bool numa_aware=false, std::string path="") {
auto graphSegments = new GraphSegments<DestID_,NodeID_>(numSegments, numa_aware);
label_to_segment[label] = graphSegments;
#ifdef LOADSEG
cout << "loading segmented graph from " << path << endl;
#pragma omp parallel for num_threads(numSegments)
for (int i = 0; i < numSegments; i++) {
FILE *in;
in = fopen((path + "/" + std::to_string(i)).c_str(), "r");
auto sg = graphSegments->getSegmentedGraph(i);
fread((void *) &sg->numVertices, sizeof(sg->numVertices), 1, in);
fread((void *) &sg->numEdges, sizeof(sg->numEdges), 1, in);
sg->allocate(i);
fread((void *) sg->graphId, sizeof(*sg->graphId), sg->numVertices, in);
fread((void *) sg->edgeArray, sizeof(*sg->edgeArray), sg->numEdges, in);
fread((void *) sg->vertexArray, sizeof(*sg->vertexArray), sg->numVertices + 1, in);
fclose(in);
}
return;
#endif
int segmentRange = (num_nodes() + numSegments - 1) / numSegments;
//Go through the original graph and count the number of target vertices and edges for each segment
for (auto d : vertices()){
for (auto s : in_neigh(d)){
int segment_id;
if (std::is_same<DestID_, NodeWeight<>>::value)
segment_id = static_cast<NodeWeight<>>(s).v/segmentRange;
else
segment_id = s/segmentRange;
graphSegments->getSegmentedGraph(segment_id)->countEdge(d);
}
}
//Allocate each segment
graphSegments->allocate();
//Add the edges for each segment
for (auto d : vertices()){
for (auto s : in_neigh(d)){
int segment_id;
if (std::is_same<DestID_, NodeWeight<>>::value)
segment_id = static_cast<NodeWeight<>>(s).v/segmentRange;
else
segment_id = s/segmentRange;
graphSegments->getSegmentedGraph(segment_id)->addEdge(d, s);
}
}
#ifdef STORESEG
cout << "output serialized graph segments to " << path << endl;
#pragma omp parallel for num_threads(numSegments)
for(int i = 0; i < numSegments; i++) {
FILE *out = fopen((path + "/" + std::to_string(i)).c_str(), "w");
auto sg = graphSegments->getSegmentedGraph(i);
fwrite((void *) &sg->numVertices, sizeof(sg->numVertices), 1, out);
fwrite((void *) &sg->numEdges, sizeof(sg->numEdges), 1, out);
fwrite((void *) sg->graphId, sizeof(*sg->graphId), sg->numVertices, out);
fwrite((void *) sg->edgeArray, sizeof(*sg->edgeArray), sg->numEdges, out);
fwrite((void *) sg->vertexArray, sizeof(*sg->vertexArray), sg->numVertices + 1, out);
fclose(out);
}
#endif
}
private:
// Making private so cannot be modified from outside
//useful for deduplication
int* flags_;
SGOffset * offsets_;
bool is_transpose_;
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
DestID_** out_index_;
DestID_* out_neighbors_;
DestID_** in_index_;
DestID_* in_neighbors_;
public:
std::shared_ptr<int> flags_shared_;
std::shared_ptr<SGOffset> offsets_shared_;
std::shared_ptr<DestID_*> out_index_shared_;
std::shared_ptr<DestID_> out_neighbors_shared_;
std::shared_ptr<DestID_*> in_index_shared_;
std::shared_ptr<DestID_> in_neighbors_shared_;
std::map<std::string, GraphSegments<DestID_,NodeID_>*> label_to_segment;
DestID_** get_out_index_(void) {
return out_index_;
}
DestID_* get_out_neighbors_(void) {
return out_neighbors_;
}
DestID_** get_in_index_(void) {
return in_index_;
}
DestID_* get_in_neighbors_(void) {
return in_neighbors_;
}
inline int* get_flags_() {
return flags_;
}
inline void set_flags_(int *flags) {
flags_ = flags;
flags_shared_.reset(flags);
}
inline SGOffset * get_offsets_(void) {
return offsets_;
}
};
#endif // GRAPH_H_
|
GB_binop__iseq_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_fc64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_fc64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_fc64)
// C=scalar+B GB (_bind1st__iseq_fc64)
// C=scalar+B' GB (_bind1st_tran__iseq_fc64)
// C=A+scalar GB (_bind2nd__iseq_fc64)
// C=A'+scalar GB (_bind2nd_tran__iseq_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = GB_FC64_iseq (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_iseq (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_FC64 || GxB_NO_ISEQ_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_iseq (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_iseq (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_iseq (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_iseq (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_sgemm_pack8to1_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack8to1_int8_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
#if NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD
if (ncnn::cpu_support_arm_asimddp())
{
void im2col_sgemm_pack8to1_int8_neon_arm82dot(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack8to1_int8_neon_arm82dot(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
if (size >= 16)
tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#else // __ARM_FEATURE_DOTPROD
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#endif // __ARM_FEATURE_DOTPROD
#else // __aarch64__
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 8u, 8, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 8u, 8, opt.workspace_allocator);
#endif // __aarch64__
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
int nn_size = size >> 4;
int remain_size_start = 0;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 16;
signed char* tmpptr = tmp.channel(i / 16);
for (int q = 0; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
// split pack8to1 to pack4
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld2 {v0.4s, v1.4s}, [%0], #32 \n"
"ld2 {v2.4s, v3.4s}, [%0], #32 \n"
"ld2 {v4.4s, v5.4s}, [%0], #32 \n"
"ld2 {v6.4s, v7.4s}, [%0] \n"
"sub %0, %0, #96 \n"
"st1 {v0.16b}, [%1], #16 \n"
"st1 {v2.16b}, [%1], #16 \n"
"st1 {v4.16b}, [%1], #16 \n"
"st1 {v6.16b}, [%1], #16 \n"
"st1 {v1.16b}, [%1], #16 \n"
"st1 {v3.16b}, [%1], #16 \n"
"st1 {v5.16b}, [%1], #16 \n"
"st1 {v7.16b}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
img0 += size * 8;
}
}
}
remain_size_start += nn_size << 4;
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8);
for (int q = 0; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld2 {v0.4s, v1.4s}, [%0], #32 \n"
"ld2 {v2.4s, v3.4s}, [%0] \n"
"sub %0, %0, #32 \n"
"st1 {v0.16b}, [%1], #16 \n"
"st1 {v2.16b}, [%1], #16 \n"
"st1 {v1.16b}, [%1], #16 \n"
"st1 {v3.16b}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += size * 8;
}
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#else // __ARM_FEATURE_DOTPROD
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 2;
#endif // __ARM_FEATURE_DOTPROD
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
#if __ARM_FEATURE_DOTPROD
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4);
#else
signed char* tmpptr = tmp.channel(i / 4);
#endif
for (int q = 0; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
#if __ARM_FEATURE_DOTPROD
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld2 {v0.4s, v1.4s}, [%0] \n"
"st1 {v0.4s, v1.4s}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
#else
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.16b, v1.16b}, [%0] \n"
"st1 {v0.16b, v1.16b}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
#endif // __ARM_FEATURE_DOTPROD
img0 += size * 8;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2);
#else
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#endif
#else
signed char* tmpptr = tmp.channel(i / 2);
#endif
for (int q = 0; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld2 {v0.2s, v1.2s}, [%0] \n"
"st1 {v0.2s, v1.2s}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
#else
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.16b}, [%0] \n"
"st1 {v0.16b}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#endif // __ARM_FEATURE_DOTPROD
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.s8 {d0-d1}, [%0 :64] \n"
"vst1.s8 {d0-d1}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "q0");
#endif
img0 += size * 8;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#else
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#endif
#else
signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
for (int q = 0; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i * 8;
for (int k = 0; k < maxk; k++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #64] \n"
"ld1 {v0.8b}, [%0] \n"
"st1 {v0.8b}, [%1], #8 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0 :64] \n"
"vst1.s8 {d0}, [%1 :64]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "d0");
#endif
img0 += size * 8;
}
}
}
}
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p + 1);
int* outptr2 = top_blob.channel(p + 2);
int* outptr3 = top_blob.channel(p + 3);
int i = 0;
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
for (; i + 15 < size; i += 16)
{
const signed char* tmpptr = tmp.channel(i / 16);
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
asm volatile(
"ld1 {v24.16b}, [%6], #16 \n" // _w0123_l
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"ld1 {v16.16b}, [%5], #16 \n" // _val0123_l
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"0: \n"
"ld1 {v17.16b}, [%5], #16 \n" // _val4567_l
"sdot v0.4s, v24.16b, v16.4b[0] \n"
"sdot v1.4s, v24.16b, v16.4b[1] \n"
"sdot v2.4s, v24.16b, v16.4b[2] \n"
"sdot v3.4s, v24.16b, v16.4b[3] \n"
"ld1 {v18.16b}, [%5], #16 \n" // _val891011_l
"sdot v4.4s, v24.16b, v17.4b[0] \n"
"sdot v5.4s, v24.16b, v17.4b[1] \n"
"sdot v6.4s, v24.16b, v17.4b[2] \n"
"sdot v7.4s, v24.16b, v17.4b[3] \n"
"ld1 {v19.16b}, [%5], #16 \n" // _val12131415_l
"sdot v8.4s, v24.16b, v18.4b[0] \n"
"sdot v9.4s, v24.16b, v18.4b[1] \n"
"ld1 {v25.16b}, [%6], #16 \n" // _w0123_h
"sdot v10.4s, v24.16b, v18.4b[2] \n"
"sdot v11.4s, v24.16b, v18.4b[3] \n"
"ld1 {v20.16b}, [%5], #16 \n" // _val0123_h
"sdot v12.4s, v24.16b, v19.4b[0] \n"
"sdot v13.4s, v24.16b, v19.4b[1] \n"
"sdot v14.4s, v24.16b, v19.4b[2] \n"
"sdot v15.4s, v24.16b, v19.4b[3] \n"
"ld1 {v21.16b}, [%5], #16 \n" // _val4567_h
"sdot v0.4s, v25.16b, v20.4b[0] \n"
"sdot v1.4s, v25.16b, v20.4b[1] \n"
"sdot v2.4s, v25.16b, v20.4b[2] \n"
"sdot v3.4s, v25.16b, v20.4b[3] \n"
"ld1 {v22.16b}, [%5], #16 \n" // _val891011_h
"sdot v4.4s, v25.16b, v21.4b[0] \n"
"sdot v5.4s, v25.16b, v21.4b[1] \n"
"sdot v6.4s, v25.16b, v21.4b[2] \n"
"sdot v7.4s, v25.16b, v21.4b[3] \n"
"ld1 {v23.16b}, [%5], #16 \n" // _val12131415_h
"sdot v8.4s, v25.16b, v22.4b[0] \n"
"sdot v9.4s, v25.16b, v22.4b[1] \n"
"ld1 {v24.16b}, [%6], #16 \n" // _w0123_l
"sdot v10.4s, v25.16b, v22.4b[2] \n"
"sdot v11.4s, v25.16b, v22.4b[3] \n"
"ld1 {v16.16b}, [%5], #16 \n" // _val0123_l
"sdot v12.4s, v25.16b, v23.4b[0] \n"
"sdot v13.4s, v25.16b, v23.4b[1] \n"
"subs %w4, %w4, #1 \n"
"sdot v14.4s, v25.16b, v23.4b[2] \n"
"sdot v15.4s, v25.16b, v23.4b[3] \n"
"bne 0b \n"
"sub %5, %5, #16 \n"
"sub %6, %6, #16 \n"
// transpose 4x16
"trn1 v16.4s, v0.4s, v1.4s \n"
"trn2 v17.4s, v0.4s, v1.4s \n"
"trn1 v18.4s, v2.4s, v3.4s \n"
"trn2 v19.4s, v2.4s, v3.4s \n"
"trn1 v20.4s, v4.4s, v5.4s \n"
"trn2 v21.4s, v4.4s, v5.4s \n"
"trn1 v22.4s, v6.4s, v7.4s \n"
"trn2 v23.4s, v6.4s, v7.4s \n"
"trn1 v24.4s, v8.4s, v9.4s \n"
"trn2 v25.4s, v8.4s, v9.4s \n"
"trn1 v26.4s, v10.4s, v11.4s \n"
"trn2 v27.4s, v10.4s, v11.4s \n"
"trn1 v28.4s, v12.4s, v13.4s \n"
"trn2 v29.4s, v12.4s, v13.4s \n"
"trn1 v30.4s, v14.4s, v15.4s \n"
"trn2 v31.4s, v14.4s, v15.4s \n"
"trn1 v0.2d, v16.2d, v18.2d \n"
"trn2 v8.2d, v16.2d, v18.2d \n"
"trn1 v4.2d, v17.2d, v19.2d \n"
"trn2 v12.2d, v17.2d, v19.2d \n"
"trn1 v1.2d, v20.2d, v22.2d \n"
"trn2 v9.2d, v20.2d, v22.2d \n"
"trn1 v5.2d, v21.2d, v23.2d \n"
"trn2 v13.2d, v21.2d, v23.2d \n"
"trn1 v2.2d, v24.2d, v26.2d \n"
"trn2 v10.2d, v24.2d, v26.2d \n"
"trn1 v6.2d, v25.2d, v27.2d \n"
"trn2 v14.2d, v25.2d, v27.2d \n"
"trn1 v3.2d, v28.2d, v30.2d \n"
"trn2 v11.2d, v28.2d, v30.2d \n"
"trn1 v7.2d, v29.2d, v31.2d \n"
"trn2 v15.2d, v29.2d, v31.2d \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%2], #64 \n"
"st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%3], #64 \n"
: "=r"(outptr0),
"=r"(outptr1),
"=r"(outptr2),
"=r"(outptr3),
"=r"(nn),
"=r"(tmpptr),
"=r"(kptr0)
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(nn),
"5"(tmpptr),
"6"(kptr0)
: "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8);
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _val4567_l = vld1q_s8(tmpptr + 16);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3);
_sum4 = vdotq_laneq_s32(_sum4, _w0123_l, _val4567_l, 0);
_sum5 = vdotq_laneq_s32(_sum5, _w0123_l, _val4567_l, 1);
_sum6 = vdotq_laneq_s32(_sum6, _w0123_l, _val4567_l, 2);
_sum7 = vdotq_laneq_s32(_sum7, _w0123_l, _val4567_l, 3);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 32);
int8x16_t _val4567_h = vld1q_s8(tmpptr + 48);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3);
_sum4 = vdotq_laneq_s32(_sum4, _w0123_h, _val4567_h, 0);
_sum5 = vdotq_laneq_s32(_sum5, _w0123_h, _val4567_h, 1);
_sum6 = vdotq_laneq_s32(_sum6, _w0123_h, _val4567_h, 2);
_sum7 = vdotq_laneq_s32(_sum7, _w0123_h, _val4567_h, 3);
tmpptr += 64;
kptr0 += 32;
}
// transpose 4x8
int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1);
int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3);
int32x4x2_t _s45 = vtrnq_s32(_sum4, _sum5);
int32x4x2_t _s67 = vtrnq_s32(_sum6, _sum7);
_sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0]));
_sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1]));
_sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0]));
_sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1]));
_sum4 = vcombine_s32(vget_low_s32(_s45.val[0]), vget_low_s32(_s67.val[0]));
_sum5 = vcombine_s32(vget_low_s32(_s45.val[1]), vget_low_s32(_s67.val[1]));
_sum6 = vcombine_s32(vget_high_s32(_s45.val[0]), vget_high_s32(_s67.val[0]));
_sum7 = vcombine_s32(vget_high_s32(_s45.val[1]), vget_high_s32(_s67.val[1]));
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr1, _sum1);
vst1q_s32(outptr2, _sum2);
vst1q_s32(outptr3, _sum3);
vst1q_s32(outptr0 + 4, _sum4);
vst1q_s32(outptr1 + 4, _sum5);
vst1q_s32(outptr2 + 4, _sum6);
vst1q_s32(outptr3 + 4, _sum7);
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
}
#endif
for (; i + 3 < size; i += 4)
{
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4);
#else
const signed char* tmpptr = tmp.channel(i / 4);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val0123_l, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val0123_l, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_l, _val0123_l, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_l, _val0123_l, 3);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 16);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val0123_h, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val0123_h, 1);
_sum2 = vdotq_laneq_s32(_sum2, _w0123_h, _val0123_h, 2);
_sum3 = vdotq_laneq_s32(_sum3, _w0123_h, _val0123_h, 3);
tmpptr += 32;
kptr0 += 32;
}
// transpose 4x4
int32x4x2_t _s01 = vtrnq_s32(_sum0, _sum1);
int32x4x2_t _s23 = vtrnq_s32(_sum2, _sum3);
_sum0 = vcombine_s32(vget_low_s32(_s01.val[0]), vget_low_s32(_s23.val[0]));
_sum1 = vcombine_s32(vget_low_s32(_s01.val[1]), vget_low_s32(_s23.val[1]));
_sum2 = vcombine_s32(vget_high_s32(_s01.val[0]), vget_high_s32(_s23.val[0]));
_sum3 = vcombine_s32(vget_high_s32(_s01.val[1]), vget_high_s32(_s23.val[1]));
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr1, _sum1);
vst1q_s32(outptr2, _sum2);
vst1q_s32(outptr3, _sum3);
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#else // __ARM_FEATURE_DOTPROD
asm volatile(
"eor v0.16b, v0.16b, v0.16b \n"
"eor v1.16b, v1.16b, v1.16b \n"
"eor v2.16b, v2.16b, v2.16b \n"
"eor v3.16b, v3.16b, v3.16b \n"
"eor v4.16b, v4.16b, v4.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"prfm pldl1keep, [%5, #128] \n"
"prfm pldl1keep, [%6, #256] \n"
"lsr w4, %w4, #1 \n" // w4 = nn >> 1
"cmp w4, #0 \n"
"beq 1f \n"
"prfm pldl1keep, [%6, #512] \n"
"add x5, %5, #16 \n"
"prfm pldl1keep, [x5, #128] \n"
"ld1 {v16.16b}, [%5] \n" // val L H
"ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%6], #64 \n"
"add %5, %5, #32 \n"
"ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L
"ld1 {v18.16b}, [%5] \n"
"add %5, %5, #32 \n"
"0: \n"
"smull v24.8h, v16.8b, v20.8b \n"
"prfm pldl1keep, [%6, #256] \n"
"smull2 v25.8h, v17.16b, v20.16b \n"
"prfm pldl1keep, [%6, #512] \n"
"smull v26.8h, v16.8b, v21.8b \n"
"subs w4, w4, #1 \n"
"smull2 v27.8h, v17.16b, v21.16b \n"
"ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L
"smlal v24.8h, v18.8b, v22.8b \n"
"smlal2 v25.8h, v19.16b, v22.16b \n"
"smlal v26.8h, v18.8b, v23.8b \n"
"smlal2 v27.8h, v19.16b, v23.16b \n"
"smull2 v29.8h, v16.16b, v20.16b \n"
"sadalp v0.4s, v24.8h \n"
"smull v28.8h, v17.8b, v20.8b \n"
"sadalp v1.4s, v25.8h \n"
"smull2 v31.8h, v16.16b, v21.16b \n"
"ld1 {v16.16b}, [x5] \n" // val L H
"smull v30.8h, v17.8b, v21.8b \n"
"add x5, x5, #32 \n"
"smlal2 v29.8h, v18.16b, v22.16b \n"
"sadalp v2.4s, v26.8h \n"
"smlal v28.8h, v19.8b, v22.8b \n"
"sadalp v3.4s, v27.8h \n"
"smlal2 v31.8h, v18.16b, v23.16b \n"
"ld1 {v18.16b}, [x5] \n"
"smlal v30.8h, v19.8b, v23.8b \n"
"ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L
"smull v24.8h, v16.8b, v20.8b \n"
"add x5, x5, #32 \n"
"smull2 v25.8h, v17.16b, v20.16b \n"
"prfm pldl1keep, [x5, #128] \n"
"smull v26.8h, v16.8b, v21.8b \n"
"prfm pldl1keep, [x5, #384] \n"
"smull2 v27.8h, v17.16b, v21.16b \n"
"ext v19.16b, v18.16b, v18.16b, #8 \n" // val H L
"smlal v24.8h, v18.8b, v22.8b \n"
"sadalp v5.4s, v29.8h \n"
"smlal2 v25.8h, v19.16b, v22.16b \n"
"sadalp v4.4s, v28.8h \n"
"smlal v26.8h, v18.8b, v23.8b \n"
"sadalp v7.4s, v31.8h \n"
"smlal2 v27.8h, v19.16b, v23.16b \n"
"sadalp v6.4s, v30.8h \n"
"smull2 v29.8h, v16.16b, v20.16b \n"
"sadalp v8.4s, v24.8h \n"
"smull v28.8h, v17.8b, v20.8b \n"
"sadalp v9.4s, v25.8h \n"
"smull2 v31.8h, v16.16b, v21.16b \n"
"ld1 {v16.16b}, [%5] \n" // val L H
"smull v30.8h, v17.8b, v21.8b \n"
"add %5, %5, #32 \n"
"smlal2 v29.8h, v18.16b, v22.16b \n"
"sadalp v10.4s, v26.8h \n"
"smlal v28.8h, v19.8b, v22.8b \n"
"sadalp v11.4s, v27.8h \n"
"smlal2 v31.8h, v18.16b, v23.16b \n"
"ld1 {v18.16b}, [%5] \n"
"smlal v30.8h, v19.8b, v23.8b \n"
"add %5, %5, #32 \n"
"ld1 {v20.16b, v21.16b, v22.16b, v23.16b}, [%6], #64 \n"
"sadalp v13.4s, v29.8h \n"
"prfm pldl1keep, [%5, #128] \n"
"sadalp v12.4s, v28.8h \n"
"prfm pldl1keep, [%5, #384] \n"
"sadalp v15.4s, v31.8h \n"
"ext v17.16b, v16.16b, v16.16b, #8 \n" // val H L
"sadalp v14.4s, v30.8h \n"
"bne 0b \n"
"sub %5, %5, #64 \n"
"sub %6, %6, #64 \n"
"1: \n"
"and w4, %w4, #1 \n" // w4 = remain = nn & 1
"cmp w4, #0 \n" // w4 > 0
"beq 2f \n"
"ld1 {v16.8b, v17.8b}, [%5], #16 \n"
"ld1 {v20.8b, v21.8b, v22.8b, v23.8b}, [%6], #32 \n"
"smull v24.8h, v16.8b, v20.8b \n"
"smull v25.8h, v16.8b, v21.8b \n"
"smull v26.8h, v16.8b, v22.8b \n"
"ld1 {v18.8b, v19.8b}, [%5], #16 \n"
"smull v27.8h, v16.8b, v23.8b \n"
"sadalp v0.4s, v24.8h \n"
"smull v28.8h, v17.8b, v20.8b \n"
"sadalp v1.4s, v25.8h \n"
"smull v29.8h, v17.8b, v21.8b \n"
"sadalp v2.4s, v26.8h \n"
"smull v30.8h, v17.8b, v22.8b \n"
"sadalp v3.4s, v27.8h \n"
"smull v31.8h, v17.8b, v23.8b \n"
"sadalp v4.4s, v28.8h \n"
"smull v24.8h, v18.8b, v20.8b \n"
"sadalp v5.4s, v29.8h \n"
"smull v25.8h, v18.8b, v21.8b \n"
"sadalp v6.4s, v30.8h \n"
"smull v26.8h, v18.8b, v22.8b \n"
"sadalp v7.4s, v31.8h \n"
"smull v27.8h, v18.8b, v23.8b \n"
"sadalp v8.4s, v24.8h \n"
"smull v28.8h, v19.8b, v20.8b \n"
"sadalp v9.4s, v25.8h \n"
"smull v29.8h, v19.8b, v21.8b \n"
"sadalp v10.4s, v26.8h \n"
"smull v30.8h, v19.8b, v22.8b \n"
"sadalp v11.4s, v27.8h \n"
"smull v31.8h, v19.8b, v23.8b \n"
"sadalp v12.4s, v28.8h \n"
"sadalp v13.4s, v29.8h \n"
"sadalp v14.4s, v30.8h \n"
"sadalp v15.4s, v31.8h \n"
"2: \n"
"addp v0.4s, v0.4s, v4.4s \n"
"addp v1.4s, v1.4s, v5.4s \n"
"addp v2.4s, v2.4s, v6.4s \n"
"addp v3.4s, v3.4s, v7.4s \n"
"addp v8.4s, v8.4s, v12.4s \n"
"addp v9.4s, v9.4s, v13.4s \n"
"addp v10.4s, v10.4s, v14.4s \n"
"addp v11.4s, v11.4s, v15.4s \n"
"addp v0.4s, v0.4s, v8.4s \n"
"addp v1.4s, v1.4s, v9.4s \n"
"addp v2.4s, v2.4s, v10.4s \n"
"addp v3.4s, v3.4s, v11.4s \n"
"st1 {v0.4s}, [%0], #16 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%2], #16 \n"
"st1 {v3.4s}, [%3], #16 \n"
: "=r"(outptr0),
"=r"(outptr1),
"=r"(outptr2),
"=r"(outptr3),
"=r"(nn),
"=r"(tmpptr),
"=r"(kptr0)
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(nn),
"5"(tmpptr),
"6"(kptr0)
: "memory", "x4", "x5", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
#endif // __ARM_FEATURE_DOTPROD
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x16_t _val01_l_h = vld1q_s8(tmpptr);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_l, _val01_l_h, 0);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_l, _val01_l_h, 1);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_laneq_s32(_sum0, _w0123_h, _val01_l_h, 2);
_sum1 = vdotq_laneq_s32(_sum1, _w0123_h, _val01_l_h, 3);
tmpptr += 16;
kptr0 += 32;
}
vst1q_lane_s32(outptr0, _sum0, 0);
vst1q_lane_s32(outptr1, _sum0, 1);
vst1q_lane_s32(outptr2, _sum0, 2);
vst1q_lane_s32(outptr3, _sum0, 3);
vst1q_lane_s32(outptr0 + 1, _sum1, 0);
vst1q_lane_s32(outptr1 + 1, _sum1, 1);
vst1q_lane_s32(outptr2 + 1, _sum1, 2);
vst1q_lane_s32(outptr3 + 1, _sum1, 3);
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
#else // __ARM_FEATURE_DOTPROD
int32x4_t _sum00 = vdupq_n_s32(0);
int32x4_t _sum01 = vdupq_n_s32(0);
int32x4_t _sum02 = vdupq_n_s32(0);
int32x4_t _sum03 = vdupq_n_s32(0);
int32x4_t _sum10 = vdupq_n_s32(0);
int32x4_t _sum11 = vdupq_n_s32(0);
int32x4_t _sum12 = vdupq_n_s32(0);
int32x4_t _sum13 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv00 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w01));
int16x8_t _wv01 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w01));
int16x8_t _wv02 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w23));
int16x8_t _wv03 = vmull_s8(vget_low_s8(_val0), vget_high_s8(_w23));
int16x8_t _wv10 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w01));
int16x8_t _wv11 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w01));
int16x8_t _wv12 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w23));
int16x8_t _wv13 = vmull_s8(vget_high_s8(_val0), vget_high_s8(_w23));
int8x16_t _w45 = vld1q_s8(kptr0 + 32);
int8x16_t _w67 = vld1q_s8(kptr0 + 48);
_wv00 = vmlal_s8(_wv00, vget_low_s8(_val1), vget_low_s8(_w45));
_wv01 = vmlal_s8(_wv01, vget_low_s8(_val1), vget_high_s8(_w45));
_wv02 = vmlal_s8(_wv02, vget_low_s8(_val1), vget_low_s8(_w67));
_wv03 = vmlal_s8(_wv03, vget_low_s8(_val1), vget_high_s8(_w67));
_wv10 = vmlal_s8(_wv10, vget_high_s8(_val1), vget_low_s8(_w45));
_wv11 = vmlal_s8(_wv11, vget_high_s8(_val1), vget_high_s8(_w45));
_wv12 = vmlal_s8(_wv12, vget_high_s8(_val1), vget_low_s8(_w67));
_wv13 = vmlal_s8(_wv13, vget_high_s8(_val1), vget_high_s8(_w67));
_sum00 = vpadalq_s16(_sum00, _wv00);
_sum01 = vpadalq_s16(_sum01, _wv01);
_sum02 = vpadalq_s16(_sum02, _wv02);
_sum03 = vpadalq_s16(_sum03, _wv03);
_sum10 = vpadalq_s16(_sum10, _wv10);
_sum11 = vpadalq_s16(_sum11, _wv11);
_sum12 = vpadalq_s16(_sum12, _wv12);
_sum13 = vpadalq_s16(_sum13, _wv13);
tmpptr += 32;
kptr0 += 64;
}
for (; j < nn; j++)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv00 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01));
int16x8_t _wv01 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01));
int16x8_t _wv02 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23));
int16x8_t _wv03 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23));
int16x8_t _wv10 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w01));
int16x8_t _wv11 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w01));
int16x8_t _wv12 = vmull_s8(vget_high_s8(_val), vget_low_s8(_w23));
int16x8_t _wv13 = vmull_s8(vget_high_s8(_val), vget_high_s8(_w23));
_sum00 = vpadalq_s16(_sum00, _wv00);
_sum01 = vpadalq_s16(_sum01, _wv01);
_sum02 = vpadalq_s16(_sum02, _wv02);
_sum03 = vpadalq_s16(_sum03, _wv03);
_sum10 = vpadalq_s16(_sum10, _wv10);
_sum11 = vpadalq_s16(_sum11, _wv11);
_sum12 = vpadalq_s16(_sum12, _wv12);
_sum13 = vpadalq_s16(_sum13, _wv13);
tmpptr += 16;
kptr0 += 32;
}
int32x4_t _s001 = vpaddq_s32(_sum00, _sum01);
int32x4_t _s023 = vpaddq_s32(_sum02, _sum03);
int32x4_t _s101 = vpaddq_s32(_sum10, _sum11);
int32x4_t _s123 = vpaddq_s32(_sum12, _sum13);
int32x4_t _sum0 = vpaddq_s32(_s001, _s023);
int32x4_t _sum1 = vpaddq_s32(_s101, _s123);
vst1q_lane_s32(outptr0, _sum0, 0);
vst1q_lane_s32(outptr1, _sum0, 1);
vst1q_lane_s32(outptr2, _sum0, 2);
vst1q_lane_s32(outptr3, _sum0, 3);
vst1q_lane_s32(outptr0 + 1, _sum1, 0);
vst1q_lane_s32(outptr1 + 1, _sum1, 1);
vst1q_lane_s32(outptr2 + 1, _sum1, 2);
vst1q_lane_s32(outptr3 + 1, _sum1, 3);
outptr0 += 2;
outptr1 += 2;
outptr2 += 2;
outptr3 += 2;
#endif // __ARM_FEATURE_DOTPROD
#else // __aarch64__
asm volatile(
"veor q0, q0 \n"
"veor q1, q1 \n"
"veor q2, q2 \n"
"veor q3, q3 \n"
"veor q4, q4 \n"
"veor q5, q5 \n"
"veor q6, q6 \n"
"veor q7, q7 \n"
"pld [%5, #256] \n"
"lsr r4, %4, #1 \n" // r4 = nn = size >> 1
"cmp r4, #0 \n"
"beq 1f \n"
"add r5, %6, #16 \n"
"pld [%6, #128] \n"
"mov r6, #32 \n"
"pld [%6, #384] \n"
"vld1.s8 {d20-d21}, [%6 :128], r6 \n" // _w01
"vld1.s8 {d16-d19}, [%5 :128]! \n" // _val0 _val1
"vld1.s8 {d22-d23}, [%6 :128], r6 \n" // _w45
"0: \n"
"vmull.s8 q12, d16, d20 \n"
"pld [%5, #256] \n"
"vmull.s8 q13, d16, d21 \n"
"pld [%6, #384] \n"
"vmull.s8 q14, d17, d20 \n"
"vmull.s8 q15, d17, d21 \n"
"vld1.s8 {d20-d21}, [r5 :128], r6 \n" // _w23
"vmlal.s8 q12, d18, d22 \n"
"vmlal.s8 q13, d18, d23 \n"
"subs r4, r4, #1 \n"
"vmlal.s8 q14, d19, d22 \n"
"vmlal.s8 q15, d19, d23 \n"
"vld1.s8 {d22-d23}, [r5 :128], r6 \n" // _w67
"vpadal.s16 q0, q12 \n"
"vmull.s8 q12, d16, d20 \n"
"vpadal.s16 q1, q13 \n"
"vmull.s8 q13, d16, d21 \n"
"vpadal.s16 q4, q14 \n"
"vmull.s8 q14, d17, d20 \n"
"vpadal.s16 q5, q15 \n"
"vmull.s8 q15, d17, d21 \n"
"vld1.s8 {d16-d17}, [%5 :128]! \n" // _val0
"vmlal.s8 q12, d18, d22 \n"
"vld1.s8 {d20-d21}, [%6 :128], r6 \n" // _w01
"vmlal.s8 q13, d18, d23 \n"
"pld [r5, #128] \n"
"vmlal.s8 q14, d19, d22 \n"
"pld [r5, #384] \n"
"vmlal.s8 q15, d19, d23 \n"
"vld1.s8 {d18-d19}, [%5 :128]! \n" // _val1
"vpadal.s16 q2, q12 \n"
"vld1.s8 {d22-d23}, [%6 :128], r6 \n" // _w45
"vpadal.s16 q3, q13 \n"
"pld [%5, #128] \n"
"vpadal.s16 q6, q14 \n"
"pld [%6, #128] \n"
"vpadal.s16 q7, q15 \n"
"bne 0b \n"
"sub %5, %5, #32 \n"
"sub %6, %6, #64 \n"
"1: \n"
"and r4, %4, #1 \n" // r4 = remain = size & 1
"cmp r4, #0 \n" // r4 > 0
"beq 2f \n"
"vld1.s8 {d16-d17}, [%5 :128]! \n" // _val
"vld1.s8 {d20-d21}, [%6 :128]! \n" // _w01
"vmull.s8 q12, d16, d20 \n"
"vld1.s8 {d22-d23}, [%6 :128]! \n" // _w23
"vmull.s8 q13, d16, d21 \n"
"vmull.s8 q14, d17, d20 \n"
"vmull.s8 q15, d17, d21 \n"
"vpadal.s16 q0, q12 \n"
"vmull.s8 q12, d16, d22 \n"
"vpadal.s16 q1, q13 \n"
"vmull.s8 q13, d16, d23 \n"
"vpadal.s16 q4, q14 \n"
"vmull.s8 q14, d17, d22 \n"
"vpadal.s16 q5, q15 \n"
"vmull.s8 q15, d17, d23 \n"
"vpadal.s16 q2, q12 \n"
"vpadal.s16 q3, q13 \n"
"vpadal.s16 q6, q14 \n"
"vpadal.s16 q7, q15 \n"
"2: \n"
"vpadd.s32 d16, d0, d1 \n"
"vpadd.s32 d17, d2, d3 \n"
"vpadd.s32 d18, d4, d5 \n"
"vpadd.s32 d19, d6, d7 \n"
"vpadd.s32 d20, d8, d9 \n"
"vpadd.s32 d21, d10, d11 \n"
"vpadd.s32 d22, d12, d13 \n"
"vpadd.s32 d23, d14, d15 \n"
"vpadd.s32 d0, d16, d17 \n"
"vpadd.s32 d1, d18, d19 \n"
"vpadd.s32 d2, d20, d21 \n"
"vpadd.s32 d3, d22, d23 \n"
"vst1.s32 {d0[0]}, [%0]! \n"
"vst1.s32 {d0[1]}, [%1]! \n"
"vst1.s32 {d1[0]}, [%2]! \n"
"vst1.s32 {d1[1]}, [%3]! \n"
"vst1.s32 {d2[0]}, [%0]! \n"
"vst1.s32 {d2[1]}, [%1]! \n"
"vst1.s32 {d3[0]}, [%2]! \n"
"vst1.s32 {d3[1]}, [%3]! \n"
: "=r"(outptr0),
"=r"(outptr1),
"=r"(outptr2),
"=r"(outptr3),
"=r"(nn),
"=r"(tmpptr),
"=r"(kptr0)
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(nn),
"5"(tmpptr),
"6"(kptr0)
: "memory", "r4", "r5", "r6", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i < size; i++)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4);
int nn = inch * maxk; // inch always > 0
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int8x8_t _val0_l_h = vld1_s8(tmpptr);
int8x16_t _w0123_l = vld1q_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _w0123_l, _val0_l_h, 0);
int8x16_t _w0123_h = vld1q_s8(kptr0 + 16);
_sum0 = vdotq_lane_s32(_sum0, _w0123_h, _val0_l_h, 1);
tmpptr += 8;
kptr0 += 32;
}
vst1q_lane_s32(outptr0, _sum0, 0);
vst1q_lane_s32(outptr1, _sum0, 1);
vst1q_lane_s32(outptr2, _sum0, 2);
vst1q_lane_s32(outptr3, _sum0, 3);
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
#else // __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv0 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w01));
int16x8_t _wv1 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w01));
int16x8_t _wv2 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w23));
int16x8_t _wv3 = vmull_s8(vget_low_s8(_val), vget_high_s8(_w23));
int8x16_t _w45 = vld1q_s8(kptr0 + 32);
int8x16_t _w67 = vld1q_s8(kptr0 + 48);
_wv0 = vmlal_s8(_wv0, vget_high_s8(_val), vget_low_s8(_w45));
_wv1 = vmlal_s8(_wv1, vget_high_s8(_val), vget_high_s8(_w45));
_wv2 = vmlal_s8(_wv2, vget_high_s8(_val), vget_low_s8(_w67));
_wv3 = vmlal_s8(_wv3, vget_high_s8(_val), vget_high_s8(_w67));
_sum0 = vpadalq_s16(_sum0, _wv0);
_sum1 = vpadalq_s16(_sum1, _wv1);
_sum2 = vpadalq_s16(_sum2, _wv2);
_sum3 = vpadalq_s16(_sum3, _wv3);
tmpptr += 16;
kptr0 += 64;
}
for (; j < nn; j++)
{
int8x8_t _val = vld1_s8(tmpptr);
int8x16_t _w01 = vld1q_s8(kptr0);
int8x16_t _w23 = vld1q_s8(kptr0 + 16);
int16x8_t _wv0 = vmull_s8(_val, vget_low_s8(_w01));
int16x8_t _wv1 = vmull_s8(_val, vget_high_s8(_w01));
int16x8_t _wv2 = vmull_s8(_val, vget_low_s8(_w23));
int16x8_t _wv3 = vmull_s8(_val, vget_high_s8(_w23));
_sum0 = vpadalq_s16(_sum0, _wv0);
_sum1 = vpadalq_s16(_sum1, _wv1);
_sum2 = vpadalq_s16(_sum2, _wv2);
_sum3 = vpadalq_s16(_sum3, _wv3);
tmpptr += 8;
kptr0 += 32;
}
#if __aarch64__
int32x4_t _s01 = vpaddq_s32(_sum0, _sum1);
int32x4_t _s23 = vpaddq_s32(_sum2, _sum3);
_sum0 = vpaddq_s32(_s01, _s23);
#else
int32x2_t _s01_low = vpadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0));
int32x2_t _s01_high = vpadd_s32(vget_low_s32(_sum1), vget_high_s32(_sum1));
int32x2_t _s23_low = vpadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2));
int32x2_t _s23_high = vpadd_s32(vget_low_s32(_sum3), vget_high_s32(_sum3));
_sum0 = vcombine_s32(vpadd_s32(_s01_low, _s01_high), vpadd_s32(_s23_low, _s23_high));
#endif
vst1q_lane_s32(outptr0, _sum0, 0);
vst1q_lane_s32(outptr1, _sum0, 1);
vst1q_lane_s32(outptr2, _sum0, 2);
vst1q_lane_s32(outptr3, _sum0, 3);
outptr0 += 1;
outptr1 += 1;
outptr2 += 1;
outptr3 += 1;
#endif // __ARM_FEATURE_DOTPROD
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
for (; i + 15 < size; i += 16)
{
const signed char* tmpptr = tmp.channel(i / 16);
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int j = 0;
for (; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _val4567_l = vld1q_s8(tmpptr + 16);
int8x16_t _val89ab_l = vld1q_s8(tmpptr + 32);
int8x16_t _valcdef_l = vld1q_s8(tmpptr + 48);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 64);
int8x16_t _val4567_h = vld1q_s8(tmpptr + 80);
int8x16_t _val89ab_h = vld1q_s8(tmpptr + 96);
int8x16_t _valcdef_h = vld1q_s8(tmpptr + 112);
int8x8_t _w_lh = vld1_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0);
_sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0);
_sum2 = vdotq_lane_s32(_sum2, _val89ab_l, _w_lh, 0);
_sum3 = vdotq_lane_s32(_sum3, _valcdef_l, _w_lh, 0);
_sum0 = vdotq_lane_s32(_sum0, _val0123_h, _w_lh, 1);
_sum1 = vdotq_lane_s32(_sum1, _val4567_h, _w_lh, 1);
_sum2 = vdotq_lane_s32(_sum2, _val89ab_h, _w_lh, 1);
_sum3 = vdotq_lane_s32(_sum3, _valcdef_h, _w_lh, 1);
tmpptr += 128;
kptr0 += 8;
}
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr0 + 4, _sum1);
vst1q_s32(outptr0 + 8, _sum2);
vst1q_s32(outptr0 + 12, _sum3);
outptr0 += 16;
}
for (; i + 7 < size; i += 8)
{
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8);
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int j = 0;
for (; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _val4567_l = vld1q_s8(tmpptr + 16);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 32);
int8x16_t _val4567_h = vld1q_s8(tmpptr + 48);
int8x8_t _w_lh = vld1_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0);
_sum1 = vdotq_lane_s32(_sum1, _val4567_l, _w_lh, 0);
_sum2 = vdotq_lane_s32(_sum2, _val0123_h, _w_lh, 1);
_sum3 = vdotq_lane_s32(_sum3, _val4567_h, _w_lh, 1);
tmpptr += 64;
kptr0 += 8;
}
_sum0 = vaddq_s32(_sum0, _sum2);
_sum1 = vaddq_s32(_sum1, _sum3);
vst1q_s32(outptr0, _sum0);
vst1q_s32(outptr0 + 4, _sum1);
outptr0 += 8;
}
#endif // __ARM_FEATURE_DOTPROD
for (; i + 3 < size; i += 4)
{
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4);
#else
const signed char* tmpptr = tmp.channel(i / 4);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int j = 0;
for (; j < nn; j++)
{
int8x16_t _val0123_l = vld1q_s8(tmpptr);
int8x16_t _val0123_h = vld1q_s8(tmpptr + 16);
int8x8_t _w_lh = vld1_s8(kptr0);
_sum0 = vdotq_lane_s32(_sum0, _val0123_l, _w_lh, 0);
_sum1 = vdotq_lane_s32(_sum1, _val0123_h, _w_lh, 1);
tmpptr += 32;
kptr0 += 8;
}
_sum0 = vaddq_s32(_sum0, _sum1);
vst1q_s32(outptr0, _sum0);
outptr0 += 4;
#else // __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x16_t _val2 = vld1q_s8(tmpptr + 32);
int8x16_t _val3 = vld1q_s8(tmpptr + 48);
int8x16_t _w = vld1q_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w));
int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w));
int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), vget_low_s8(_w));
int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), vget_low_s8(_w));
_s0 = vmlal_s8(_s0, vget_low_s8(_val2), vget_high_s8(_w));
_s1 = vmlal_s8(_s1, vget_high_s8(_val2), vget_high_s8(_w));
_s2 = vmlal_s8(_s2, vget_low_s8(_val3), vget_high_s8(_w));
_s3 = vmlal_s8(_s3, vget_high_s8(_val3), vget_high_s8(_w));
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
_sum4 = vaddw_s16(_sum4, vget_low_s16(_s2));
_sum5 = vaddw_s16(_sum5, vget_high_s16(_s2));
_sum6 = vaddw_s16(_sum6, vget_low_s16(_s3));
_sum7 = vaddw_s16(_sum7, vget_high_s16(_s3));
tmpptr += 64;
kptr0 += 16;
}
for (; j < nn; j++)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x8_t _w = vld1_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), _w);
int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), _w);
int16x8_t _s2 = vmull_s8(vget_low_s8(_val1), _w);
int16x8_t _s3 = vmull_s8(vget_high_s8(_val1), _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
_sum4 = vaddw_s16(_sum4, vget_low_s16(_s2));
_sum5 = vaddw_s16(_sum5, vget_high_s16(_s2));
_sum6 = vaddw_s16(_sum6, vget_low_s16(_s3));
_sum7 = vaddw_s16(_sum7, vget_high_s16(_s3));
tmpptr += 32;
kptr0 += 8;
}
_sum0 = vaddq_s32(_sum0, _sum1);
_sum2 = vaddq_s32(_sum2, _sum3);
_sum4 = vaddq_s32(_sum4, _sum5);
_sum6 = vaddq_s32(_sum6, _sum7);
int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0));
int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2));
int32x2_t _s4 = vadd_s32(vget_low_s32(_sum4), vget_high_s32(_sum4));
int32x2_t _s6 = vadd_s32(vget_low_s32(_sum6), vget_high_s32(_sum6));
int32x2_t _ss0 = vpadd_s32(_s0, _s2);
int32x2_t _ss1 = vpadd_s32(_s4, _s6);
int32x4_t _ss = vcombine_s32(_ss0, _ss1);
vst1q_s32(outptr0, _ss);
outptr0 += 4;
#endif // __ARM_FEATURE_DOTPROD
}
#endif // __aarch64__
for (; i + 1 < size; i += 2)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
#if __ARM_FEATURE_DOTPROD
int32x2_t _sum0 = vdup_n_s32(0);
int32x2_t _sum1 = vdup_n_s32(0);
int j = 0;
for (; j < nn; j++)
{
int8x16_t _val01_lh = vld1q_s8(tmpptr);
int8x8_t _w_lh = vld1_s8(kptr0);
_sum0 = vdot_lane_s32(_sum0, vget_low_s8(_val01_lh), _w_lh, 0);
_sum1 = vdot_lane_s32(_sum1, vget_high_s8(_val01_lh), _w_lh, 1);
tmpptr += 16;
kptr0 += 8;
}
int32x2_t _sum = vadd_s32(_sum0, _sum1);
vst1_s32(outptr0, _sum);
outptr0 += 2;
#else // __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val0 = vld1q_s8(tmpptr);
int8x16_t _val1 = vld1q_s8(tmpptr + 16);
int8x16_t _w = vld1q_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val0), vget_low_s8(_w));
int16x8_t _s1 = vmull_s8(vget_high_s8(_val0), vget_low_s8(_w));
_s0 = vmlal_s8(_s0, vget_low_s8(_val1), vget_high_s8(_w));
_s1 = vmlal_s8(_s1, vget_high_s8(_val1), vget_high_s8(_w));
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
tmpptr += 32;
kptr0 += 16;
}
for (; j < nn; j++)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x8_t _w = vld1_s8(kptr0);
int16x8_t _s0 = vmull_s8(vget_low_s8(_val), _w);
int16x8_t _s1 = vmull_s8(vget_high_s8(_val), _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s0));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s0));
_sum2 = vaddw_s16(_sum2, vget_low_s16(_s1));
_sum3 = vaddw_s16(_sum3, vget_high_s16(_s1));
tmpptr += 16;
kptr0 += 8;
}
_sum0 = vaddq_s32(_sum0, _sum1);
_sum2 = vaddq_s32(_sum2, _sum3);
int32x2_t _s0 = vadd_s32(vget_low_s32(_sum0), vget_high_s32(_sum0));
int32x2_t _s2 = vadd_s32(vget_low_s32(_sum2), vget_high_s32(_sum2));
int32x2_t _ss = vpadd_s32(_s0, _s2);
vst1_s32(outptr0, _ss);
outptr0 += 2;
#endif // __ARM_FEATURE_DOTPROD
}
for (; i < size; i++)
{
#if __aarch64__
#if __ARM_FEATURE_DOTPROD
const signed char* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#endif
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p / 4 + p % 4);
int nn = inch * maxk; // inch always > 0
#if __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x2_t _sum1 = vdup_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w = vld1q_s8(kptr0);
_sum0 = vdotq_s32(_sum0, _val, _w);
tmpptr += 16;
kptr0 += 16;
}
for (; j < nn; j++)
{
int8x8_t _val = vld1_s8(tmpptr);
int8x8_t _w = vld1_s8(kptr0);
_sum1 = vdot_s32(_sum1, _val, _w);
tmpptr += 8;
kptr0 += 8;
}
int sum = vaddvq_s32(_sum0) + vaddv_s32(_sum1);
outptr0[0] = sum;
outptr0 += 1;
#else // __ARM_FEATURE_DOTPROD
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int j = 0;
for (; j + 1 < nn; j += 2)
{
int8x16_t _val = vld1q_s8(tmpptr);
int8x16_t _w = vld1q_s8(kptr0);
int16x8_t _s8 = vmull_s8(vget_low_s8(_val), vget_low_s8(_w));
_s8 = vmlal_s8(_s8, vget_high_s8(_val), vget_high_s8(_w));
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s8));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s8));
tmpptr += 16;
kptr0 += 16;
}
for (; j < nn; j++)
{
int8x8_t _val = vld1_s8(tmpptr);
int8x8_t _w = vld1_s8(kptr0);
int16x8_t _s8 = vmull_s8(_val, _w);
_sum0 = vaddw_s16(_sum0, vget_low_s16(_s8));
_sum1 = vaddw_s16(_sum1, vget_high_s16(_s8));
tmpptr += 8;
kptr0 += 8;
}
int32x4_t _sum = vaddq_s32(_sum0, _sum1);
#if __aarch64__
int sum = vaddvq_s32(_sum); // dot
#else
int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum));
_ss = vpadd_s32(_ss, _ss);
int sum = vget_lane_s32(_ss, 0);
#endif
outptr0[0] = sum;
outptr0 += 1;
#endif // __ARM_FEATURE_DOTPROD
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack8to1_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
#if NCNN_ARM82DOT && __ARM_NEON && __aarch64__ && !__ARM_FEATURE_DOTPROD
if (ncnn::cpu_support_arm_asimddp())
{
extern void convolution_im2col_sgemm_transform_kernel_pack8to1_int8_neon_arm82dot(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h);
convolution_im2col_sgemm_transform_kernel_pack8to1_int8_neon_arm82dot(_kernel, kernel_tm, inch, outch, kernel_w, kernel_h);
return;
}
#endif
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 8a-4b-maxk-inch/8a-outch/4b
// dst = 4a-4b-2-maxk-inch/8a-outch/4b (arm82)
Mat kernel = _kernel.reshape(maxk, inch, outch);
if (outch >= 4)
kernel_tm.create(32 * maxk, inch / 8, outch / 4 + outch % 4, (size_t)1u);
else
kernel_tm.create(8 * maxk, inch / 8, outch, (size_t)1u);
int q = 0;
for (; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
#if __ARM_FEATURE_DOTPROD
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
for (int i = 0; i < 4; i++)
{
for (int j = 4; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
#else
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
#endif
}
}
}
// TODO unroll 2
for (; q < outch; q++)
{
signed char* g00 = kernel_tm.channel(q / 4 + q % 4);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int k = 0; k < maxk; k++)
{
for (int j = 0; j < 8; j++)
{
const signed char* k00 = kernel.channel(q).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
}
static void convolution_im2col_sgemm_pack8to1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
{
const int gap = (w * stride_h - outw * stride_w) * 8;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
signed char* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v * 8;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
int8x8_t _val0 = vld1_s8(sptr);
int8x8_t _val1 = vld1_s8(sptr + stride_w * 8);
int8x8_t _val2 = vld1_s8(sptr + stride_w * 16);
int8x8_t _val3 = vld1_s8(sptr + stride_w * 24);
vst1_s8(ptr, _val0);
vst1_s8(ptr + 8, _val1);
vst1_s8(ptr + 16, _val2);
vst1_s8(ptr + 24, _val3);
sptr += stride_w * 32;
ptr += 32;
}
for (; j + 1 < outw; j += 2)
{
int8x8_t _val0 = vld1_s8(sptr);
int8x8_t _val1 = vld1_s8(sptr + stride_w * 8);
vst1_s8(ptr, _val0);
vst1_s8(ptr + 8, _val1);
sptr += stride_w * 16;
ptr += 16;
}
for (; j < outw; j++)
{
int8x8_t _val = vld1_s8(sptr);
vst1_s8(ptr, _val);
sptr += stride_w * 8;
ptr += 8;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack8to1_int8_neon(bottom_im2col, top_blob, kernel, opt);
}
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,2);t1++) {
lbp=max(ceild(t1,2),ceild(4*t1-Nt+2,4));
ubp=min(floord(4*Nt+Nz-9,16),floord(8*t1+Nz+2,16));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(16*t2-Nz+5,8)),t1),2*t1-2*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(8*t1+Ny+7,8)),floord(16*t2+Ny+3,8)),floord(16*t1-16*t2+Nz+Ny+5,8));t3++) {
for (t4=max(max(max(0,ceild(t1-31,32)),ceild(16*t2-Nz-243,256)),ceild(8*t3-Ny-243,256));t4<=min(min(min(min(floord(4*Nt+Nx-9,256),floord(8*t1+Nx+7,256)),floord(16*t2+Nx+3,256)),floord(8*t3+Nx-5,256)),floord(16*t1-16*t2+Nz+Nx+5,256));t4++) {
for (t5=max(max(max(max(max(0,ceild(16*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(256*t4-Nx+5,4)),2*t1),4*t1-4*t2+1);t5<=min(min(min(min(min(floord(16*t1-16*t2+Nz+10,4),2*t3),Nt-1),2*t1+3),4*t2+2),64*t4+62);t5++) {
for (t6=max(max(16*t2,4*t5+4),-16*t1+16*t2+8*t5-15);t6<=min(min(16*t2+15,-16*t1+16*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(256*t4,4*t5+4);
ubv=min(256*t4+255,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
game_of_life.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define N 2048
#define itera_max 2000
#define cores 1
int grid [N][N];
int new_grid[N][N];
void inicia_grids_zero(){
int i, j;
//iniciando com zero
#pragma omp parallel for collapse(2)
for (i = 0; i < N; i++){
for (j = 0; j < N; j++){
grid[i][j] = 0;
new_grid[i][j] = 0;
}
}
}
void geracao_inicial(){
//GLIDER
int lin = 1, col = 1;
grid[lin ][col+1] = 1;
grid[lin+1][col+2] = 1;
grid[lin+2][col ] = 1;
grid[lin+2][col+1] = 1;
grid[lin+2][col+2] = 1;
//R-pentomino
lin =10; col = 30;
grid[lin ][col+1] = 1;
grid[lin ][col+2] = 1;
grid[lin+1][col ] = 1;
grid[lin+1][col+1] = 1;
grid[lin+2][col+1] = 1;
}
int getNeighbors(int table[N][N], int i, int j){
int numAliveNeighbors = 0;
// Up
if(i != 0){
if(table[i - 1][j] == 1){
numAliveNeighbors++;
}
}else{
if(table[N - 1][j] == 1){
numAliveNeighbors++;
}
}
// Down
if(table[(i + 1)%N][j] == 1){
numAliveNeighbors++;
}
// Left
if(j != 0){
if(table[i][j - 1] == 1){
numAliveNeighbors++;
}
}else{
if(table[i][N - 1] == 1){
numAliveNeighbors++;
}
}
// Right
if(table[i][(j + 1)%N] == 1){
numAliveNeighbors++;
}
// Upper-Right Corner
if((i == 0) && (j == N - 1)){
if(table[N - 1][0] == 1){
numAliveNeighbors++;
}
}else{
// i!=0 || j != n-1
if(i == 0){
// já sabemos que j != N - 1
if(table[N - 1][j + 1] == 1){
numAliveNeighbors++;
}
}else{// i != 0
if(j == N - 1){
if(table[i - 1][0] == 1){
numAliveNeighbors++;
}
}else{
if(table[i - 1][j + 1] == 1){
numAliveNeighbors++;
}
}
}
}
// Lower-Right Corner
if(table[(i + 1)%N][(j + 1)%N] == 1){
numAliveNeighbors++;
}
// Upper-Left Corner
if((i == 0) && (j == 0)){
if(table[N - 1][N - 1] == 1){
numAliveNeighbors++;
}
}else{
// i!=0 || j != 0
if(i == 0){
// já sabemos que j != 0
if(table[N - 1][j -1] == 1){
numAliveNeighbors++;
}
}else{// i != 0
if(j == 0){
if(table[i - 1][N - 1] == 1){
numAliveNeighbors++;
}
}else{
if(table[i - 1][j - 1] == 1){
numAliveNeighbors++;
}
}
}
}
// Lower-Left Corner
if((i == N - 1) && (j == 0)){
if(table[0][N - 1] == 1){
numAliveNeighbors++;
}
}else{
// i!=n-1 || j != 0
if(i == N - 1){
// já sabemos que j != 0
if(table[0][j - 1] == 1){
numAliveNeighbors++;
}
}else{// i != n-1
if(j == 0){
if(table[i + 1][N - 1] == 1){
numAliveNeighbors++;
}
}else{
if(table[i + 1][j - 1] == 1){
numAliveNeighbors++;
}
}
}
}
return numAliveNeighbors;
}
void game_of_life(){
int i;
int j;
#pragma omp parallel for collapse(2)
for (i = 0; i < N; i++){
for (j = 0; j < N; j++){
//aplicar as regras do jogo da vida
//celulas vivas com menos de 2 vizinhas vivas morrem
if(grid[i][j] == 1 && getNeighbors(grid, i, j) < 2){
new_grid[i][j] = 0;
}
//célula viva com 2 ou 3 vizinhos deve permanecer viva para a próxima geração
else if (grid[i][j] == 1 && getNeighbors(grid, i, j) == 2 || getNeighbors(grid, i, j) == 3){
new_grid[i][j] = 1;
}
//célula viva com 4 ou mais vizinhos morre por superpopulação
else if (grid[i][j] == 1 && getNeighbors(grid, i, j) >= 4){
new_grid[i][j] = 0;
}
//morta com exatamente 3 vizinhos deve se tornar viva
else if (grid[i][j] == 0 && getNeighbors(grid, i, j) == 3){
new_grid[i][j] = 1;
}
}
}
//passar a nova geração para atual
#pragma omp parallel for collapse(2)
for (i = 0; i < N; i++){
for (j = 0; j < N; j++){
grid[i][j] = new_grid[i][j];
}
}
}
int count_LiveCells(){
int i;
int j;
int cont = 0;
#pragma omp parallel for collapse (2) reduction(+ : cont)
for (i = 0; i < N; i++){
for (j = 0; j < N; j++){
if (grid[i][j] == 1){
cont++;
}
}
}
return cont;
}
int main (){
int i, j;
int var;
int vida;
int cont = 0;
double start;
double end;
omp_set_num_threads(cores);
inicia_grids_zero();
geracao_inicial();
start = omp_get_wtime ();
for (vida = 0; vida < itera_max; vida++){
/*
for (i = 0; i < N; i++){
for (j = 0; j < N; j++){
if (grid[i][j] == 1){
printf("\033[1;31m");
printf("%d", grid[i][j]);
printf("\033[0m");
}
else{
printf("%d", grid[i][j]);
}
}
printf("\n");
}*/
//printf("VIVOS: %d\n", count_LiveCells());
game_of_life();
//getchar(); //para fazer o for esperar por um enter
}
end = omp_get_wtime();
cont = count_LiveCells ();
printf("VIVOS: %d\n", cont);
printf("CORES: %d\n", cores);
printf("TEMPO: %.2f\n", end - start);
/*
for (i = 0; i < N; i++){
for (j = 0; j < N; j++){
printf("%d", grid[i][j]);
}
printf("\n");
}
*/
return 0;
} |
convolution_3x3_pack4to1_bf16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd64_pack4to1_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6;
int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, 16u, elempack, opt.workspace_allocator);
conv3x3s1_winograd64_transform_input_pack4_bf16s_neon(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
// permute
// bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
#if __aarch64__
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + tiles % 12 % 4, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator);
#else
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 64, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, 4u * elempack, elempack, opt.workspace_allocator);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
float* tm2p = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v16.4s, v17.4s, v18.4s, v19.4s}, [%0] \n"
"sub %0, %0, #128 \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
"st1 {v19.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19");
r0 += bottom_blob_tm.cstep * 4;
}
}
#endif
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8);
#else
float* tm2p = tm2.row(i / 8);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n"
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n"
"sub %0, %0, #64 \n"
"st1 {v0.4s}, [%1], #16 \n"
"st1 {v4.4s}, [%1], #16 \n"
"st1 {v1.4s}, [%1], #16 \n"
"st1 {v5.4s}, [%1], #16 \n"
"st1 {v2.4s}, [%1], #16 \n"
"st1 {v6.4s}, [%1], #16 \n"
"st1 {v3.4s}, [%1], #16 \n"
"st1 {v7.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.f32 {d0-d3}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d4-d7}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d16-d19}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d20-d23}, [%0 :128] \n"
"sub %0, %0, #96 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vswp d17, d20 \n"
"vswp d19, d22 \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
"vst1.f32 {d16-d17}, [%1 :128]! \n"
"vst1.f32 {d4-d5}, [%1 :128]! \n"
"vst1.f32 {d20-d21}, [%1 :128]! \n"
"vst1.f32 {d2-d3}, [%1 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"vst1.f32 {d6-d7}, [%1 :128]! \n"
"vst1.f32 {d22-d23}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
#endif
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n"
"st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #256] \n"
"vld4.f32 {d0-d3}, [%0 :128]! \n"
"pld [%0, #256] \n"
"vld4.f32 {d4-d7}, [%0 :128] \n"
"sub %0, %0, #32 \n"
"vswp d1, d4 \n"
"vswp d3, d6 \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
"vst1.f32 {d4-d5}, [%1 :128]! \n"
"vst1.f32 {d2-d3}, [%1 :128]! \n"
"vst1.f32 {d6-d7}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
for (; i < tiles; i++)
{
#if __aarch64__
float* tm2p = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
#else
float* tm2p = tm2.row(i / 8 + (i % 8) / 4 + i % 4);
#endif
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 4;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.4s}, [%0] \n"
"st1 {v0.4s}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.f32 {d0-d1}, [%0 :128] \n"
"vst1.f32 {d0-d1}, [%1 :128]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 4;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, 4u, 1, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
#if __aarch64__
nn_outch = outch >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
float* output4_tm = top_blob_tm.channel(p + 4);
float* output5_tm = top_blob_tm.channel(p + 5);
float* output6_tm = top_blob_tm.channel(p + 6);
float* output7_tm = top_blob_tm.channel(p + 7);
const Mat kernel01_tm = kernel_tm.channel(p / 8);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v23.4s, v0.4s, v5.s[1] \n"
"fmla v26.4s, v0.4s, v5.s[2] \n"
"fmla v29.4s, v0.4s, v5.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v21.4s, v1.4s, v5.s[0] \n"
"fmla v24.4s, v1.4s, v5.s[1] \n"
"fmla v27.4s, v1.4s, v5.s[2] \n"
"fmla v30.4s, v1.4s, v5.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"fmla v22.4s, v2.4s, v5.s[0] \n"
"fmla v25.4s, v2.4s, v5.s[1] \n"
"fmla v28.4s, v2.4s, v5.s[2] \n"
"fmla v31.4s, v2.4s, v5.s[3] \n"
"fmla v8.4s, v3.4s, v6.s[0] \n"
"fmla v11.4s, v3.4s, v6.s[1] \n"
"fmla v14.4s, v3.4s, v6.s[2] \n"
"fmla v17.4s, v3.4s, v6.s[3] \n"
"fmla v20.4s, v3.4s, v7.s[0] \n"
"fmla v23.4s, v3.4s, v7.s[1] \n"
"fmla v26.4s, v3.4s, v7.s[2] \n"
"fmla v29.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v9.4s, v0.4s, v6.s[0] \n"
"fmla v12.4s, v0.4s, v6.s[1] \n"
"fmla v15.4s, v0.4s, v6.s[2] \n"
"fmla v18.4s, v0.4s, v6.s[3] \n"
"fmla v21.4s, v0.4s, v7.s[0] \n"
"fmla v24.4s, v0.4s, v7.s[1] \n"
"fmla v27.4s, v0.4s, v7.s[2] \n"
"fmla v30.4s, v0.4s, v7.s[3] \n"
"fmla v10.4s, v1.4s, v6.s[0] \n"
"fmla v13.4s, v1.4s, v6.s[1] \n"
"fmla v16.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v22.4s, v1.4s, v7.s[0] \n"
"fmla v25.4s, v1.4s, v7.s[1] \n"
"fmla v28.4s, v1.4s, v7.s[2] \n"
"fmla v31.4s, v1.4s, v7.s[3] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"fmla v8.4s, v2.4s, v4.s[0] \n"
"fmla v11.4s, v2.4s, v4.s[1] \n"
"fmla v14.4s, v2.4s, v4.s[2] \n"
"fmla v17.4s, v2.4s, v4.s[3] \n"
"fmla v20.4s, v2.4s, v5.s[0] \n"
"fmla v23.4s, v2.4s, v5.s[1] \n"
"fmla v26.4s, v2.4s, v5.s[2] \n"
"fmla v29.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v4.s[0] \n"
"fmla v12.4s, v3.4s, v4.s[1] \n"
"fmla v15.4s, v3.4s, v4.s[2] \n"
"fmla v18.4s, v3.4s, v4.s[3] \n"
"fmla v21.4s, v3.4s, v5.s[0] \n"
"fmla v24.4s, v3.4s, v5.s[1] \n"
"fmla v27.4s, v3.4s, v5.s[2] \n"
"fmla v30.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v10.4s, v0.4s, v4.s[0] \n"
"fmla v13.4s, v0.4s, v4.s[1] \n"
"fmla v16.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v22.4s, v0.4s, v5.s[0] \n"
"fmla v25.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v31.4s, v0.4s, v5.s[3] \n"
"fmla v8.4s, v1.4s, v6.s[0] \n"
"fmla v11.4s, v1.4s, v6.s[1] \n"
"fmla v14.4s, v1.4s, v6.s[2] \n"
"fmla v17.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v23.4s, v1.4s, v7.s[1] \n"
"fmla v26.4s, v1.4s, v7.s[2] \n"
"fmla v29.4s, v1.4s, v7.s[3] \n"
"fmla v9.4s, v2.4s, v6.s[0] \n"
"fmla v12.4s, v2.4s, v6.s[1] \n"
"fmla v15.4s, v2.4s, v6.s[2] \n"
"fmla v18.4s, v2.4s, v6.s[3] \n"
"fmla v21.4s, v2.4s, v7.s[0] \n"
"fmla v24.4s, v2.4s, v7.s[1] \n"
"fmla v27.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"fmla v10.4s, v3.4s, v6.s[0] \n"
"fmla v13.4s, v3.4s, v6.s[1] \n"
"fmla v16.4s, v3.4s, v6.s[2] \n"
"fmla v19.4s, v3.4s, v6.s[3] \n"
"fmla v22.4s, v3.4s, v7.s[0] \n"
"fmla v25.4s, v3.4s, v7.s[1] \n"
"fmla v28.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n"
"st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n"
"st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n"
"st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n"
"st1 {v20.4s, v21.4s, v22.4s}, [%5], #48 \n"
"st1 {v23.4s, v24.4s, v25.4s}, [%6], #48 \n"
"st1 {v26.4s, v27.4s, v28.4s}, [%7], #48 \n"
"st1 {v29.4s, v30.4s, v31.4s}, [%8], #48 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v18.4s, v0.4s, v4.s[1] \n"
"fmla v20.4s, v0.4s, v4.s[2] \n"
"fmla v22.4s, v0.4s, v4.s[3] \n"
"fmla v24.4s, v0.4s, v5.s[0] \n"
"fmla v26.4s, v0.4s, v5.s[1] \n"
"fmla v28.4s, v0.4s, v5.s[2] \n"
"fmla v30.4s, v0.4s, v5.s[3] \n"
"fmla v17.4s, v1.4s, v4.s[0] \n"
"fmla v19.4s, v1.4s, v4.s[1] \n"
"fmla v21.4s, v1.4s, v4.s[2] \n"
"fmla v23.4s, v1.4s, v4.s[3] \n"
"fmla v25.4s, v1.4s, v5.s[0] \n"
"fmla v27.4s, v1.4s, v5.s[1] \n"
"fmla v29.4s, v1.4s, v5.s[2] \n"
"fmla v31.4s, v1.4s, v5.s[3] \n"
"fmla v16.4s, v2.4s, v6.s[0] \n"
"fmla v18.4s, v2.4s, v6.s[1] \n"
"fmla v20.4s, v2.4s, v6.s[2] \n"
"fmla v22.4s, v2.4s, v6.s[3] \n"
"fmla v24.4s, v2.4s, v7.s[0] \n"
"fmla v26.4s, v2.4s, v7.s[1] \n"
"fmla v28.4s, v2.4s, v7.s[2] \n"
"fmla v30.4s, v2.4s, v7.s[3] \n"
"fmla v17.4s, v3.4s, v6.s[0] \n"
"fmla v19.4s, v3.4s, v6.s[1] \n"
"fmla v21.4s, v3.4s, v6.s[2] \n"
"fmla v23.4s, v3.4s, v6.s[3] \n"
"fmla v25.4s, v3.4s, v7.s[0] \n"
"fmla v27.4s, v3.4s, v7.s[1] \n"
"fmla v29.4s, v3.4s, v7.s[2] \n"
"fmla v31.4s, v3.4s, v7.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n"
"fmla v16.4s, v12.4s, v8.s[0] \n"
"fmla v18.4s, v12.4s, v8.s[1] \n"
"fmla v20.4s, v12.4s, v8.s[2] \n"
"fmla v22.4s, v12.4s, v8.s[3] \n"
"fmla v24.4s, v12.4s, v9.s[0] \n"
"fmla v26.4s, v12.4s, v9.s[1] \n"
"fmla v28.4s, v12.4s, v9.s[2] \n"
"fmla v30.4s, v12.4s, v9.s[3] \n"
"fmla v17.4s, v13.4s, v8.s[0] \n"
"fmla v19.4s, v13.4s, v8.s[1] \n"
"fmla v21.4s, v13.4s, v8.s[2] \n"
"fmla v23.4s, v13.4s, v8.s[3] \n"
"fmla v25.4s, v13.4s, v9.s[0] \n"
"fmla v27.4s, v13.4s, v9.s[1] \n"
"fmla v29.4s, v13.4s, v9.s[2] \n"
"fmla v31.4s, v13.4s, v9.s[3] \n"
"fmla v16.4s, v14.4s, v10.s[0] \n"
"fmla v18.4s, v14.4s, v10.s[1] \n"
"fmla v20.4s, v14.4s, v10.s[2] \n"
"fmla v22.4s, v14.4s, v10.s[3] \n"
"fmla v24.4s, v14.4s, v11.s[0] \n"
"fmla v26.4s, v14.4s, v11.s[1] \n"
"fmla v28.4s, v14.4s, v11.s[2] \n"
"fmla v30.4s, v14.4s, v11.s[3] \n"
"fmla v17.4s, v15.4s, v10.s[0] \n"
"fmla v19.4s, v15.4s, v10.s[1] \n"
"fmla v21.4s, v15.4s, v10.s[2] \n"
"fmla v23.4s, v15.4s, v10.s[3] \n"
"fmla v25.4s, v15.4s, v11.s[0] \n"
"fmla v27.4s, v15.4s, v11.s[1] \n"
"fmla v29.4s, v15.4s, v11.s[2] \n"
"fmla v31.4s, v15.4s, v11.s[3] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
"st1 {v20.4s, v21.4s}, [%3], #32 \n"
"st1 {v22.4s, v23.4s}, [%4], #32 \n"
"st1 {v24.4s, v25.4s}, [%5], #32 \n"
"st1 {v26.4s, v27.4s}, [%6], #32 \n"
"st1 {v28.4s, v29.4s}, [%7], #32 \n"
"st1 {v30.4s, v31.4s}, [%8], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v0.4s, v4.s[0] \n"
"fmla v17.4s, v0.4s, v4.s[1] \n"
"fmla v18.4s, v0.4s, v4.s[2] \n"
"fmla v19.4s, v0.4s, v4.s[3] \n"
"fmla v20.4s, v0.4s, v5.s[0] \n"
"fmla v21.4s, v0.4s, v5.s[1] \n"
"fmla v22.4s, v0.4s, v5.s[2] \n"
"fmla v23.4s, v0.4s, v5.s[3] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n"
"fmla v16.4s, v1.4s, v6.s[0] \n"
"fmla v17.4s, v1.4s, v6.s[1] \n"
"fmla v18.4s, v1.4s, v6.s[2] \n"
"fmla v19.4s, v1.4s, v6.s[3] \n"
"fmla v20.4s, v1.4s, v7.s[0] \n"
"fmla v21.4s, v1.4s, v7.s[1] \n"
"fmla v22.4s, v1.4s, v7.s[2] \n"
"fmla v23.4s, v1.4s, v7.s[3] \n"
"fmla v16.4s, v2.4s, v8.s[0] \n"
"fmla v17.4s, v2.4s, v8.s[1] \n"
"fmla v18.4s, v2.4s, v8.s[2] \n"
"fmla v19.4s, v2.4s, v8.s[3] \n"
"fmla v20.4s, v2.4s, v9.s[0] \n"
"fmla v21.4s, v2.4s, v9.s[1] \n"
"fmla v22.4s, v2.4s, v9.s[2] \n"
"fmla v23.4s, v2.4s, v9.s[3] \n"
"fmla v16.4s, v3.4s, v10.s[0] \n"
"fmla v17.4s, v3.4s, v10.s[1] \n"
"fmla v18.4s, v3.4s, v10.s[2] \n"
"fmla v19.4s, v3.4s, v10.s[3] \n"
"fmla v20.4s, v3.4s, v11.s[0] \n"
"fmla v21.4s, v3.4s, v11.s[1] \n"
"fmla v22.4s, v3.4s, v11.s[2] \n"
"fmla v23.4s, v3.4s, v11.s[3] \n"
"bne 0b \n"
"st1 {v16.4s}, [%1], #16 \n"
"st1 {v17.4s}, [%2], #16 \n"
"st1 {v18.4s}, [%3], #16 \n"
"st1 {v19.4s}, [%4], #16 \n"
"st1 {v20.4s}, [%5], #16 \n"
"st1 {v21.4s}, [%6], #16 \n"
"st1 {v22.4s}, [%7], #16 \n"
"st1 {v23.4s}, [%8], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #128] \n"
"ld1 {v0.4s}, [%9], #16 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%10], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v16.4s, v4.4s, v0.s[0] \n"
"fmla v17.4s, v5.4s, v0.s[0] \n"
"fmla v18.4s, v6.4s, v0.s[1] \n"
"fmla v19.4s, v7.4s, v0.s[1] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%10], #64 \n"
"fmla v16.4s, v8.4s, v0.s[2] \n"
"fmla v17.4s, v9.4s, v0.s[2] \n"
"fmla v18.4s, v10.4s, v0.s[3] \n"
"fmla v19.4s, v11.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"st1 {v16.s}[0], [%1], #4 \n"
"st1 {v16.s}[1], [%2], #4 \n"
"st1 {v16.s}[2], [%3], #4 \n"
"st1 {v16.s}[3], [%4], #4 \n"
"st1 {v17.s}[0], [%5], #4 \n"
"st1 {v17.s}[1], [%6], #4 \n"
"st1 {v17.s}[2], [%7], #4 \n"
"st1 {v17.s}[3], [%8], #4 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19");
}
}
}
remain_outch_start += nn_outch << 3;
nn_outch = (outch - remain_outch_start) >> 2;
#else // __aarch64__
nn_outch = outch >> 2;
#endif // __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* output0_tm = top_blob_tm.channel(p);
float* output1_tm = top_blob_tm.channel(p + 1);
float* output2_tm = top_blob_tm.channel(p + 2);
float* output3_tm = top_blob_tm.channel(p + 3);
#if __aarch64__
const Mat kernel01_tm = kernel_tm.channel(p / 8 + (p % 8) / 4);
#else
const Mat kernel01_tm = kernel_tm.channel(p / 4);
#endif
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v11.4s, v0.4s, v4.s[1] \n"
"fmla v14.4s, v0.4s, v4.s[2] \n"
"fmla v17.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v12.4s, v1.4s, v4.s[1] \n"
"fmla v15.4s, v1.4s, v4.s[2] \n"
"fmla v18.4s, v1.4s, v4.s[3] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"fmla v13.4s, v2.4s, v4.s[1] \n"
"fmla v16.4s, v2.4s, v4.s[2] \n"
"fmla v19.4s, v2.4s, v4.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%5], #64 \n"
"fmla v8.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v14.4s, v3.4s, v5.s[2] \n"
"fmla v17.4s, v3.4s, v5.s[3] \n"
"fmla v9.4s, v20.4s, v5.s[0] \n"
"fmla v12.4s, v20.4s, v5.s[1] \n"
"fmla v15.4s, v20.4s, v5.s[2] \n"
"fmla v18.4s, v20.4s, v5.s[3] \n"
"fmla v10.4s, v21.4s, v5.s[0] \n"
"fmla v13.4s, v21.4s, v5.s[1] \n"
"fmla v16.4s, v21.4s, v5.s[2] \n"
"fmla v19.4s, v21.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%5], #64 \n"
"fmla v8.4s, v22.4s, v6.s[0] \n"
"fmla v11.4s, v22.4s, v6.s[1] \n"
"fmla v14.4s, v22.4s, v6.s[2] \n"
"fmla v17.4s, v22.4s, v6.s[3] \n"
"fmla v9.4s, v23.4s, v6.s[0] \n"
"fmla v12.4s, v23.4s, v6.s[1] \n"
"fmla v15.4s, v23.4s, v6.s[2] \n"
"fmla v18.4s, v23.4s, v6.s[3] \n"
"fmla v10.4s, v24.4s, v6.s[0] \n"
"fmla v13.4s, v24.4s, v6.s[1] \n"
"fmla v16.4s, v24.4s, v6.s[2] \n"
"fmla v19.4s, v24.4s, v6.s[3] \n"
"fmla v8.4s, v25.4s, v7.s[0] \n"
"fmla v11.4s, v25.4s, v7.s[1] \n"
"fmla v14.4s, v25.4s, v7.s[2] \n"
"fmla v17.4s, v25.4s, v7.s[3] \n"
"fmla v9.4s, v26.4s, v7.s[0] \n"
"fmla v12.4s, v26.4s, v7.s[1] \n"
"fmla v15.4s, v26.4s, v7.s[2] \n"
"fmla v18.4s, v26.4s, v7.s[3] \n"
"fmla v10.4s, v27.4s, v7.s[0] \n"
"fmla v13.4s, v27.4s, v7.s[1] \n"
"fmla v16.4s, v27.4s, v7.s[2] \n"
"fmla v19.4s, v27.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n"
"st1 {v11.4s, v12.4s, v13.4s}, [%2], #48 \n"
"st1 {v14.4s, v15.4s, v16.4s}, [%3], #48 \n"
"st1 {v17.4s, v18.4s, v19.4s}, [%4], #48 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27");
}
#endif // __aarch64__
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
#else
const float* r0 = bb2.row(i / 8);
#endif
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v10.4s, v0.4s, v4.s[1] \n"
"fmla v12.4s, v0.4s, v4.s[2] \n"
"fmla v14.4s, v0.4s, v4.s[3] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v11.4s, v1.4s, v4.s[1] \n"
"fmla v13.4s, v1.4s, v4.s[2] \n"
"fmla v15.4s, v1.4s, v4.s[3] \n"
"fmla v8.4s, v2.4s, v5.s[0] \n"
"fmla v10.4s, v2.4s, v5.s[1] \n"
"fmla v12.4s, v2.4s, v5.s[2] \n"
"fmla v14.4s, v2.4s, v5.s[3] \n"
"fmla v9.4s, v3.4s, v5.s[0] \n"
"fmla v11.4s, v3.4s, v5.s[1] \n"
"fmla v13.4s, v3.4s, v5.s[2] \n"
"fmla v15.4s, v3.4s, v5.s[3] \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%5], #64 \n"
"fmla v8.4s, v16.4s, v6.s[0] \n"
"fmla v10.4s, v16.4s, v6.s[1] \n"
"fmla v12.4s, v16.4s, v6.s[2] \n"
"fmla v14.4s, v16.4s, v6.s[3] \n"
"fmla v9.4s, v17.4s, v6.s[0] \n"
"fmla v11.4s, v17.4s, v6.s[1] \n"
"fmla v13.4s, v17.4s, v6.s[2] \n"
"fmla v15.4s, v17.4s, v6.s[3] \n"
"fmla v8.4s, v18.4s, v7.s[0] \n"
"fmla v10.4s, v18.4s, v7.s[1] \n"
"fmla v12.4s, v18.4s, v7.s[2] \n"
"fmla v14.4s, v18.4s, v7.s[3] \n"
"fmla v9.4s, v19.4s, v7.s[0] \n"
"fmla v11.4s, v19.4s, v7.s[1] \n"
"fmla v13.4s, v19.4s, v7.s[2] \n"
"fmla v15.4s, v19.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
"st1 {v10.4s, v11.4s}, [%2], #32 \n"
"st1 {v12.4s, v13.4s}, [%3], #32 \n"
"st1 {v14.4s, v15.4s}, [%4], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"veor q12, q12 \n"
"veor q13, q13 \n"
"veor q14, q14 \n"
"veor q15, q15 \n"
"0: \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
"pld [%6, #512] \n"
"vldm %6!, {d8-d15} \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q10, q0, d8[1] \n"
"vmla.f32 q12, q0, d9[0] \n"
"vmla.f32 q14, q0, d9[1] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q11, q1, d8[1] \n"
"vmla.f32 q13, q1, d9[0] \n"
"vmla.f32 q15, q1, d9[1] \n"
"vmla.f32 q8, q2, d10[0] \n"
"vmla.f32 q10, q2, d10[1] \n"
"vmla.f32 q12, q2, d11[0] \n"
"vmla.f32 q14, q2, d11[1] \n"
"vmla.f32 q9, q3, d10[0] \n"
"vmla.f32 q11, q3, d10[1] \n"
"vmla.f32 q13, q3, d11[0] \n"
"vmla.f32 q15, q3, d11[1] \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
"vmla.f32 q8, q0, d12[0] \n"
"vmla.f32 q10, q0, d12[1] \n"
"vmla.f32 q12, q0, d13[0] \n"
"vmla.f32 q14, q0, d13[1] \n"
"vmla.f32 q9, q1, d12[0] \n"
"vmla.f32 q11, q1, d12[1] \n"
"vmla.f32 q13, q1, d13[0] \n"
"vmla.f32 q15, q1, d13[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d14[0] \n"
"vmla.f32 q10, q2, d14[1] \n"
"vmla.f32 q12, q2, d15[0] \n"
"vmla.f32 q14, q2, d15[1] \n"
"vmla.f32 q9, q3, d14[0] \n"
"vmla.f32 q11, q3, d14[1] \n"
"vmla.f32 q13, q3, d15[0] \n"
"vmla.f32 q15, q3, d15[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d19}, [%1]! \n"
"vst1.f32 {d20-d23}, [%2]! \n"
"vst1.f32 {d24-d27}, [%3]! \n"
"vst1.f32 {d28-d31}, [%4]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#endif
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v0.4s, v4.s[1] \n"
"fmla v10.4s, v0.4s, v4.s[2] \n"
"fmla v11.4s, v0.4s, v4.s[3] \n"
"fmla v8.4s, v1.4s, v5.s[0] \n"
"fmla v9.4s, v1.4s, v5.s[1] \n"
"fmla v10.4s, v1.4s, v5.s[2] \n"
"fmla v11.4s, v1.4s, v5.s[3] \n"
"fmla v8.4s, v2.4s, v6.s[0] \n"
"fmla v9.4s, v2.4s, v6.s[1] \n"
"fmla v10.4s, v2.4s, v6.s[2] \n"
"fmla v11.4s, v2.4s, v6.s[3] \n"
"fmla v8.4s, v3.4s, v7.s[0] \n"
"fmla v9.4s, v3.4s, v7.s[1] \n"
"fmla v10.4s, v3.4s, v7.s[2] \n"
"fmla v11.4s, v3.4s, v7.s[3] \n"
"bne 0b \n"
"st1 {v8.4s}, [%1], #16 \n"
"st1 {v9.4s}, [%2], #16 \n"
"st1 {v10.4s}, [%3], #16 \n"
"st1 {v11.4s}, [%4], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
"pld [%6, #512] \n"
"vldm %6!, {d8-d15} \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q0, d8[1] \n"
"vmla.f32 q10, q0, d9[0] \n"
"vmla.f32 q11, q0, d9[1] \n"
"vmla.f32 q8, q1, d10[0] \n"
"vmla.f32 q9, q1, d10[1] \n"
"vmla.f32 q10, q1, d11[0] \n"
"vmla.f32 q11, q1, d11[1] \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q2, d12[0] \n"
"vmla.f32 q9, q2, d12[1] \n"
"vmla.f32 q10, q2, d13[0] \n"
"vmla.f32 q11, q2, d13[1] \n"
"vmla.f32 q8, q3, d14[0] \n"
"vmla.f32 q9, q3, d14[1] \n"
"vmla.f32 q10, q3, d15[0] \n"
"vmla.f32 q11, q3, d15[1] \n"
"bne 0b \n"
"vst1.f32 {d16-d17}, [%1]! \n"
"vst1.f32 {d18-d19}, [%2]! \n"
"vst1.f32 {d20-d21}, [%3]! \n"
"vst1.f32 {d22-d23}, [%4]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < tiles; i++)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
#endif
const float* kptr = kernel01_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"prfm pldl1keep, [%6, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%6], #64 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[1] \n"
"fmla v10.4s, v6.4s, v0.s[2] \n"
"fmla v11.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"st1 {v8.s}[0], [%1], #4 \n"
"st1 {v8.s}[1], [%2], #4 \n"
"st1 {v8.s}[2], [%3], #4 \n"
"st1 {v8.s}[3], [%4], #4 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5]! \n"
"pld [%6, #512] \n"
"vldm %6!, {d8-d15} \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[1] \n"
"vmla.f32 q10, q6, d1[0] \n"
"vmla.f32 q11, q7, d1[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vst1.f32 {d16[0]}, [%1]! \n"
"vst1.f32 {d16[1]}, [%2]! \n"
"vst1.f32 {d17[0]}, [%3]! \n"
"vst1.f32 {d17[1]}, [%4]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(r0), // %5
"=r"(kptr) // %6
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(r0),
"6"(kptr)
: "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
#if __aarch64__
const Mat kernel0_tm = kernel_tm.channel(p / 8 + (p % 8) / 4 + p % 4);
#else
const Mat kernel0_tm = kernel_tm.channel(p / 4 + p % 4);
#endif
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* kptr = kernel0_tm.row(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v5.16b, v5.16b, v5.16b \n"
"eor v6.16b, v6.16b, v6.16b \n"
"eor v7.16b, v7.16b, v7.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3], #16 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[0] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n"
"fmla v5.4s, v3.4s, v4.s[1] \n"
"fmla v6.4s, v12.4s, v4.s[1] \n"
"fmla v7.4s, v13.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%2], #64 \n"
"fmla v8.4s, v14.4s, v4.s[2] \n"
"fmla v9.4s, v15.4s, v4.s[2] \n"
"fmla v10.4s, v16.4s, v4.s[2] \n"
"fmla v5.4s, v17.4s, v4.s[3] \n"
"fmla v6.4s, v18.4s, v4.s[3] \n"
"fmla v7.4s, v19.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v5.4s \n"
"fadd v9.4s, v9.4s, v6.4s \n"
"fadd v10.4s, v10.4s, v7.4s \n"
"st1 {v8.4s, v9.4s, v10.4s}, [%1], #48 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
#endif
for (; i + 7 < tiles; i += 8)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
#else
const float* r0 = bb2.row(i / 8);
#endif
const float* kptr = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3], #16 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[0] \n"
"fmla v10.4s, v2.4s, v4.s[1] \n"
"fmla v11.4s, v3.4s, v4.s[1] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%2], #64 \n"
"fmla v8.4s, v12.4s, v4.s[2] \n"
"fmla v9.4s, v13.4s, v4.s[2] \n"
"fmla v10.4s, v14.4s, v4.s[3] \n"
"fmla v11.4s, v15.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v9.4s, v9.4s, v11.4s \n"
"st1 {v8.4s, v9.4s}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #128] \n"
"vld1.f32 {d8-d9}, [%3]! \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[0] \n"
"vmla.f32 q10, q2, d8[1] \n"
"vmla.f32 q11, q3, d8[1] \n"
"pld [%2, #512] \n"
"vldm %2!, {d24-d31} \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q12, d9[0] \n"
"vmla.f32 q9, q13, d9[0] \n"
"vmla.f32 q10, q14, d9[1] \n"
"vmla.f32 q11, q15, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q9, q9, q11 \n"
"vst1.f32 {d16-d19}, [%1]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15");
#endif // __aarch64__
}
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4);
#endif
const float* kptr = kernel0_tm.row(r);
int nn = inch; // inch always > 0
#if __aarch64__
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n"
"prfm pldl1keep, [%3, #128] \n"
"ld1 {v4.4s}, [%3], #16 \n"
"subs %w0, %w0, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"st1 {v8.4s}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11");
#else // __aarch64__
asm volatile(
"veor q8, q8 \n"
"veor q9, q9 \n"
"veor q10, q10 \n"
"veor q11, q11 \n"
"0: \n"
"pld [%2, #512] \n"
"vldm %2!, {d0-d7} \n"
"pld [%3, #128] \n"
"vld1.f32 {d8-d9}, [%3]! \n"
"subs %0, %0, #1 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vst1.f32 {d16-d17}, [%1]! \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(r0), // %2
"=r"(kptr) // %3
: "0"(nn),
"1"(output0_tm),
"2"(r0),
"3"(kptr)
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11");
#endif // __aarch64__
}
for (; i < tiles; i++)
{
#if __aarch64__
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + i % 12 % 4);
#else
const float* r0 = bb2.row(i / 8 + (i % 8) / 4 + i % 4);
#endif
const float* kptr = kernel0_tm.row(r);
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (int q = 0; q < inch; q++)
{
float32x4_t _r0 = vld1q_f32(r0);
float32x4_t _k0 = vld1q_f32(kptr);
_sum0 = vmlaq_f32(_sum0, _r0, _k0);
kptr += 4;
r0 += 4;
}
#if __aarch64__
float sum0 = vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float32x2_t _ss2 = vpadd_f32(_ss, _ss);
float sum0 = vget_lane_f32(_ss2, 0);
#endif
output0_tm[0] = sum0;
output0_tm++;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 2u, 1, opt.workspace_allocator);
}
{
conv3x3s1_winograd64_transform_output_bf16s_neon(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
merge.c | #include "merge.h"
/*void merging_BFT(char* prefix_bft1, char* prefix_bft2, char* output_prefix, int cut_lvl, bool packed_in_subtries){
ASSERT_NULL_PTR(prefix_bft1, "merging_BFT()\n")
ASSERT_NULL_PTR(prefix_bft2, "merging_BFT()\n")
UC* uc = NULL;
CC* cc = NULL;
Node* node = NULL;
BFT** bfts_insert;
BFT* bft1 = l_read_BFT_Root(prefix_bft1, cut_lvl);
BFT* bft2 = l_read_BFT_Root(prefix_bft2, cut_lvl);
bool overlap_genome_ids = are_genomes_ids_overlapping(bft1, bft2);
int nb_ccs_bft1 = 0;
int lvl_root = (bft1->k / NB_CHAR_SUF_PREF) - 1;
int nb_bytes = CEIL(bft1->k * 2, SIZE_BITS_UINT_8T);
int len_filename_cc = strlen(prefix_bft1);
int len_output_filename = strlen(output_prefix);
int old_nb_genome_ids_bft = bft1->nb_genomes;
int i, j, nb_threads, thread_id;
int len_filename_container;
int len_output_filename_cont;
int nb_skp;
int* presence_bf = get_bf_presence_per_cc(bft1);
BFT_annotation** bft_annot;
FILE** files_cc;
FILE* file_tmp;
uint8_t** kmer_comp;
uint8_t** kmer_comp_cpy;
char** new_filename;
char** output_filename = (char**) malloc(sizeof(char*));
ASSERT_NULL_PTR(output_filename, "merging_BFT() 4\n");
output_filename[0] = (char*) malloc((len_output_filename + 30) * sizeof(char));
ASSERT_NULL_PTR(output_filename[0], "merging_BFT() 4\n");
strcpy(output_filename[0], output_prefix);
strcpy(&(output_filename[0][len_output_filename]), "_c");
if (bft1->node.CC_array != NULL){
do {nb_ccs_bft1++;}
while (IS_EVEN(((CC*)bft1->node.CC_array)[nb_ccs_bft1-1].type));
}
files_cc = (FILE**) malloc((nb_ccs_bft1 + 1) * sizeof(FILE*));
ASSERT_NULL_PTR(files_cc, "merging_BFT() 6\n");
for (j = 0; j <= nb_ccs_bft1; j++){
sprintf(&(output_filename[0][len_output_filename+2]), "%d", j);
files_cc[j] = fopen(output_filename[0], "w");
ASSERT_NULL_PTR(files_cc[j], "merging_BFT() 7\n");
}
free(output_filename[0]);
free(output_filename);
l_iterate_over_kmers(bft2, cut_lvl, packed_in_subtries, prefix_bft2, l_insert_kmer, nb_bytes, files_cc, presence_bf, nb_ccs_bft1);
for (j = 0; j <= nb_ccs_bft1; j++) fclose(files_cc[j]);
if (overlap_genome_ids) add_genomes_BFT_Root(bft2->nb_genomes - 1, &(bft2->filenames[1]), bft1);
else add_genomes_BFT_Root(bft2->nb_genomes, bft2->filenames, bft1);
if (bft1->node.CC_array != NULL){
#pragma omp parallel \
shared(bft1, nb_ccs_bft1, new_filename, output_filename, len_filename_cc, nb_bytes, kmer_comp, kmer_comp_cpy, files_cc, \
bft_annot, old_nb_genome_ids_bft, overlap_genome_ids, lvl_root, nb_threads, bfts_insert) \
private(i, j, uc, cc, node, nb_skp, file_tmp, len_filename_container, len_output_filename_cont, thread_id)
{
#pragma omp single
{
nb_threads = omp_get_num_threads();
bfts_insert = (BFT**) malloc(nb_threads * sizeof(BFT*));
ASSERT_NULL_PTR(bfts_insert, "merging_BFT() 3\n");
new_filename = (char**) malloc(nb_threads * sizeof(char*));
ASSERT_NULL_PTR(new_filename, "merging_BFT() 3\n");
output_filename = (char**) malloc(nb_threads * sizeof(char*));
ASSERT_NULL_PTR(output_filename, "merging_BFT() 3\n");
kmer_comp = (uint8_t**) malloc(nb_threads * sizeof(uint8_t*));
ASSERT_NULL_PTR(kmer_comp, "merging_BFT() 3\n");
kmer_comp_cpy = (uint8_t**) malloc(nb_threads * sizeof(uint8_t*));
ASSERT_NULL_PTR(kmer_comp_cpy, "merging_BFT() 3\n");
bft_annot = (BFT_annotation**) malloc(nb_threads * sizeof(BFT_annotation*));
ASSERT_NULL_PTR(bft_annot, "merging_BFT() 3\n");
for (i = 0; i < nb_threads; i++){
bfts_insert[i] = copy_BFT_Root(bft1);
kmer_comp[i] = (uint8_t*) malloc(nb_bytes * sizeof(uint8_t));
ASSERT_NULL_PTR(kmer_comp[i], "merging_BFT() 1\n");
kmer_comp_cpy[i] = (uint8_t*) malloc(nb_bytes * sizeof(uint8_t));
ASSERT_NULL_PTR(kmer_comp_cpy[i], "merging_BFT() 1\n");
new_filename[i] = (char*) malloc((len_filename_cc + 30) * sizeof(char));
ASSERT_NULL_PTR(new_filename[i], "merging_BFT() 3\n");
strcpy(new_filename[i], prefix_bft1);
new_filename[i][len_filename_cc] = '_';
output_filename[i] = (char*) malloc((len_output_filename + 30) * sizeof(char));
ASSERT_NULL_PTR(output_filename[i], "merging_BFT() 3\n");
strcpy(output_filename[i], output_prefix);
output_filename[i][len_output_filename] = '_';
bft_annot[i] = create_BFT_annotation();
bft_annot[i]->annot = (uint8_t*) calloc(SIZE_MAX_BYTE_ANNOT, sizeof(uint8_t));
ASSERT_NULL_PTR(bft_annot[i]->annot, "merging_BFT() 5\n");
}
}
#pragma omp for
for (i = 0; i < nb_ccs_bft1; i++){
thread_id = omp_get_thread_num();
cc = &(((CC*)bfts_insert[thread_id]->node.CC_array)[i]);
nb_skp = CEIL(cc->nb_elem, bfts_insert[thread_id]->info_per_lvl[lvl_root].nb_ucs_skp);
sprintf(&(new_filename[thread_id][len_filename_cc+1]), "%d", i);
len_filename_container = strlen(new_filename[thread_id]);
if (!packed_in_subtries) new_filename[thread_id][len_filename_container] = '_';
output_filename[thread_id][len_output_filename+1] = 'c';
sprintf(&(output_filename[thread_id][len_output_filename+2]), "%d", i);
files_cc[i] = fopen(output_filename[thread_id], "r");
ASSERT_NULL_PTR(files_cc[i], "merging_BFT() 8\n");
sprintf(&(output_filename[thread_id][len_output_filename+1]), "%d", i);
len_output_filename_cont = strlen(output_filename[thread_id]);
if (!packed_in_subtries) output_filename[thread_id][len_output_filename_cont] = '_';
else{
file_tmp = fopen(new_filename[thread_id], "r");
ASSERT_NULL_PTR(file_tmp, "merging_BFT() 9\n");
}
for (j = 0; j < nb_skp; j++){
uc = &(((UC*)cc->children)[j]);
if (!packed_in_subtries){
sprintf(&(new_filename[thread_id][len_filename_container+1]), "%d", j);
file_tmp = fopen(new_filename[thread_id], "r");
ASSERT_NULL_PTR(file_tmp, "merging_BFT() 9.1\n");
}
if (fread(&(uc->nb_children), sizeof(uint16_t), 1, file_tmp) != 1) ERROR("merging_BFT() 10\n")
if (lvl_root){
read_UC_sparse(uc, bfts_insert[thread_id]->ann_inf, file_tmp,
bfts_insert[thread_id]->info_per_lvl[lvl_root].size_kmer_in_bytes_minus_1, uc->nb_children);
}
else if (j != nb_skp-1){
read_UC_sparse(uc, bfts_insert[thread_id]->ann_inf, file_tmp, 0,
bfts_insert[thread_id]->info_per_lvl[lvl_root].nb_ucs_skp);
}
else {
read_UC_sparse(uc, bfts_insert[thread_id]->ann_inf, file_tmp, 0,
cc->nb_elem - j * bfts_insert[thread_id]->info_per_lvl[lvl_root].nb_ucs_skp);
}
if (!packed_in_subtries) fclose(file_tmp);
}
for (j = 0; j < cc->nb_Node_children; j++){
node = &(cc->children_Node_container[j]);
if (!packed_in_subtries){
sprintf(&(new_filename[thread_id][len_filename_container+1]), "%d", j + nb_skp);
file_tmp = fopen(new_filename[thread_id], "r");
ASSERT_NULL_PTR(file_tmp, "merging_BFT() 11\n");
}
l_read_Node(node, bfts_insert[thread_id], lvl_root-1, cut_lvl, file_tmp, new_filename[thread_id],
bfts_insert[thread_id]->k - NB_CHAR_SUF_PREF);
if (!packed_in_subtries) fclose(file_tmp);
}
while (fread(kmer_comp[thread_id], sizeof(uint8_t), nb_bytes, files_cc[i]) == nb_bytes){
if (fread(&(bft_annot[thread_id]->size_annot), sizeof(int), 1, files_cc[i]) != 1) ERROR("merging_BFT() 13\n")
if (fread(bft_annot[thread_id]->annot, sizeof(uint8_t), bft_annot[thread_id]->size_annot, files_cc[i])
!= bft_annot[thread_id]->size_annot) ERROR("merging_BFT() 14\n")
l_insert_kmer_bis(bfts_insert[thread_id], lvl_root, kmer_comp[thread_id], kmer_comp_cpy[thread_id],
nb_bytes, old_nb_genome_ids_bft, overlap_genome_ids, bft_annot[thread_id], i);
memset(bft_annot[thread_id]->annot, 0, bft_annot[thread_id]->size_annot * sizeof(uint8_t));
}
fclose(files_cc[i]);
if (packed_in_subtries){
fclose(file_tmp);
file_tmp = fopen(output_filename[thread_id], "w");
ASSERT_NULL_PTR(file_tmp, "merging_BFT() 15\n");
}
cc = &(((CC*)bfts_insert[thread_id]->node.CC_array)[i]);
nb_skp = CEIL(cc->nb_elem, bfts_insert[thread_id]->info_per_lvl[lvl_root].nb_ucs_skp);
if (i < nb_ccs_bft1 - 1){
for (j = 0; j < nb_skp; j++){
uc = &(((UC*)cc->children)[j]);
if (!packed_in_subtries){
sprintf(&(output_filename[thread_id][len_output_filename_cont+1]), "%d", j);
file_tmp = fopen(output_filename[thread_id], "w");
ASSERT_NULL_PTR(file_tmp, "merging_BFT() 15\n");
}
if (lvl_root){
write_UC_sparse(uc, bfts_insert[thread_id], file_tmp,
bfts_insert[thread_id]->info_per_lvl[lvl_root].size_kmer_in_bytes_minus_1,
uc->nb_children, true);
}
else if (j != nb_skp-1){
write_UC_sparse(uc, bfts_insert[thread_id], file_tmp, 0,
bfts_insert[thread_id]->info_per_lvl[lvl_root].nb_ucs_skp, false);
}
else{
write_UC_sparse(uc, bfts_insert[thread_id], file_tmp, 0,
cc->nb_elem - j * bfts_insert[thread_id]->info_per_lvl[lvl_root].nb_ucs_skp, false);
}
if (!packed_in_subtries) fclose(file_tmp);
if (uc->suffixes != NULL) free(uc->suffixes);
}
for (j = 0; j < cc->nb_Node_children; j++){
node = &(cc->children_Node_container[j]);
if (!packed_in_subtries){
sprintf(&(output_filename[thread_id][len_output_filename_cont+1]), "%d", j + nb_skp);
file_tmp = fopen(output_filename[thread_id], "w");
ASSERT_NULL_PTR(file_tmp, "merging_BFT() 16\n");
}
l_write_Node(node, bfts_insert[thread_id], lvl_root-1, bfts_insert[thread_id]->k - NB_CHAR_SUF_PREF,
cut_lvl, true, file_tmp, output_filename[thread_id]);
if (!packed_in_subtries) fclose(file_tmp);
freeNode(node, lvl_root-1, bfts_insert[thread_id]->info_per_lvl);
}
if (packed_in_subtries) fclose(file_tmp);
//printf("Merging CC %d finished\n", i);
}
}
#pragma omp single
{
for (i = 1; i < nb_threads; i++){
free(kmer_comp[i]);
free(kmer_comp_cpy[i]);
free(new_filename[i]);
free(output_filename[i]);
free(bft_annot[i]->annot);
free(bft_annot[i]);
}
}
}
output_filename[0][len_output_filename+1] = 'c';
sprintf(&(output_filename[0][len_output_filename+2]), "%d", nb_ccs_bft1);
files_cc[nb_ccs_bft1] = fopen(output_filename[0], "r");
ASSERT_NULL_PTR(files_cc[nb_ccs_bft1], "merging_BFT() 17\n");
i = nb_ccs_bft1 - 1;
while (fread(kmer_comp[0], sizeof(uint8_t), nb_bytes, files_cc[nb_ccs_bft1]) == nb_bytes){
if (fread(&(bft_annot[0]->size_annot), sizeof(int), 1, files_cc[nb_ccs_bft1]) != 1) ERROR("merging_BFT() 19\n")
if (fread(bft_annot[0]->annot, sizeof(uint8_t), bft_annot[0]->size_annot, files_cc[nb_ccs_bft1])
!= bft_annot[0]->size_annot) ERROR("merging_BFT() 20\n")
l_insert_kmer_bis(bft1, lvl_root, kmer_comp[0], kmer_comp_cpy[0], nb_bytes, old_nb_genome_ids_bft,
overlap_genome_ids, bft_annot[0], i);
memset(bft_annot[0]->annot, 0, bft_annot[0]->size_annot * sizeof(uint8_t));
}
fclose(files_cc[nb_ccs_bft1]);
do {
cc = &(((CC*)bft1->node.CC_array)[i]);
nb_skp = CEIL(cc->nb_elem, bft1->info_per_lvl[lvl_root].nb_ucs_skp);
sprintf(&(output_filename[0][len_output_filename+1]), "%d", i);
len_output_filename_cont = strlen(output_filename[0]);
if (!packed_in_subtries) output_filename[0][len_output_filename_cont] = '_';
else{
file_tmp = fopen(output_filename[0], "w");
ASSERT_NULL_PTR(file_tmp, "merging_BFT() 9\n");
}
for (j = 0; j < nb_skp; j++){
uc = &(((UC*)cc->children)[j]);
if (!packed_in_subtries){
sprintf(&(output_filename[0][len_output_filename_cont+1]), "%d", j);
file_tmp = fopen(output_filename[0], "w");
ASSERT_NULL_PTR(file_tmp, "merging_BFT() 21\n");
}
if (lvl_root){
write_UC_sparse(uc, bft1, file_tmp, bft1->info_per_lvl[lvl_root].size_kmer_in_bytes_minus_1,
uc->nb_children, true);
}
else if (j != nb_skp-1) write_UC_sparse(uc, bft1, file_tmp, 0, bft1->info_per_lvl[lvl_root].nb_ucs_skp, false);
else write_UC_sparse(uc, bft1, file_tmp, 0, cc->nb_elem - j * bft1->info_per_lvl[lvl_root].nb_ucs_skp, false);
if (!packed_in_subtries) fclose(file_tmp);
if (uc->suffixes != NULL) free(uc->suffixes);
}
for (j = 0; j < cc->nb_Node_children; j++){
node = &(cc->children_Node_container[j]);
if (!packed_in_subtries){
sprintf(&(output_filename[0][len_output_filename_cont+1]), "%d", j + nb_skp);
file_tmp = fopen(output_filename[0], "w");
ASSERT_NULL_PTR(file_tmp, "merging_BFT() 22\n");
}
l_write_Node(node, bft1, lvl_root-1, bft1->k - NB_CHAR_SUF_PREF, cut_lvl, true, file_tmp,
output_filename[0]);
if (!packed_in_subtries) fclose(file_tmp);
freeNode(node, lvl_root-1, bft1->info_per_lvl);
}
if (packed_in_subtries) fclose(file_tmp);
i++;
}
while (IS_EVEN(((CC*)bft1->node.CC_array)[i-1].type));
l_write_BFT_Root(bft1, output_prefix, cut_lvl, false);
}
free(kmer_comp[0]);
free(kmer_comp_cpy[0]);
free(bft_annot[0]->annot);
free(bft_annot[0]);
free(new_filename[0]);
free(output_filename[0]);
free(kmer_comp);
free(kmer_comp_cpy);
free(bft_annot);
free(new_filename);
free(output_filename);
free(presence_bf);
free(files_cc);
}
size_t l_insert_kmer(BFT_kmer* kmer, BFT* graph, va_list args){
uint32_t substring_prefix;
uint8_t kmer_cpy[SIZE_BYTES_SUF_PREF];
int id_file;
int nb_bytes = va_arg(args, int);
FILE** files_cc = va_arg(args, FILE**);
int* presence_bf = va_arg(args, int*);
int nb_ccs = va_arg(args, int);
BFT_annotation* bft_annot = get_annotation(kmer);
if ((bft_annot->annot[0] & 0x3) == 3){
uint32_t position = bft_annot->annot[0] >> 2;
int i = 0;
while ((i < bft_annot->size_annot) && (IS_ODD(bft_annot->annot[i]))){
position |= ((uint32_t)(bft_annot->annot[i] >> 1)) << (6+(i-1)*7);
i++;
}
if ((i >= bft_annot->size_annot) && (bft_annot->annot_ext != NULL)){
if (IS_ODD(bft_annot->annot_ext[0])){
position |= ((uint32_t)(bft_annot->annot_ext[0] >> 1)) << (6 + (i + bft_annot->size_annot - 1) * 7);
i++;
}
}
bft_annot->annot = extract_from_annotation_array_elem(graph->comp_set_colors, position, &bft_annot->size_annot);
i = decomp_annotation(graph->ann_inf, bft_annot->annot, bft_annot->size_annot, NULL, 0, false);
if (i != 0){
bft_annot->annot = graph->ann_inf->annotation;
bft_annot->size_annot = i;
}
bft_annot->annot_ext = NULL;
}
kmer_cpy[0] = reverse_word_8(kmer->kmer_comp[0]);
kmer_cpy[1] = reverse_word_8(kmer->kmer_comp[1]);
kmer_cpy[2] = reverse_word_8(kmer->kmer_comp[2]) & 0xc0;
substring_prefix = (kmer_cpy[0] << 10) | (kmer_cpy[1] << 2) | (kmer_cpy[2] >> 6);
if (presence_bf[substring_prefix] == -1) id_file = nb_ccs;
else id_file = presence_bf[substring_prefix];
if (fwrite(kmer->kmer_comp, sizeof(uint8_t), nb_bytes, files_cc[id_file]) != nb_bytes) ERROR("l_insert_kmer2()\n")
if (bft_annot->annot_ext == NULL){
if (fwrite(&(bft_annot->size_annot), sizeof(int), 1, files_cc[id_file]) != 1) ERROR("l_insert_kmer2()\n")
if (fwrite(bft_annot->annot, sizeof(uint8_t), bft_annot->size_annot, files_cc[id_file]) != bft_annot->size_annot) ERROR("l_insert_kmer2()\n")
}
else{
bft_annot->size_annot++;
if (fwrite(&(bft_annot->size_annot), sizeof(int), 1, files_cc[id_file]) != 1) ERROR("l_insert_kmer2()\n")
bft_annot->size_annot--;
if (fwrite(bft_annot->annot, sizeof(uint8_t), bft_annot->size_annot, files_cc[id_file]) != bft_annot->size_annot) ERROR("l_insert_kmer2()\n")
if (fwrite(bft_annot->annot_ext, sizeof(uint8_t), 1, files_cc[id_file]) != 1) ERROR("l_insert_kmer2()\n")
}
memset(graph->ann_inf->annotation, 0, graph->ann_inf->size_annot * sizeof(uint8_t));
graph->ann_inf->size_annot = 0;
free_BFT_annotation(bft_annot);
return 1;
}
void l_insert_kmer_bis(BFT* bft, int lvl_root, uint8_t* kmer_comp, uint8_t* kmer_comp_cpy, int nb_bytes, int old_nb_genome_ids_bft,
bool overlap_genome_ids, BFT_annotation* bft_annot, int pos_start_search){
uint32_t id;
uint32_t size_id;
uint32_t pow2_imin = 0;
uint8_t* annot;
uint8_t* annot_ext;
uint8_t* annot_cplx;
int size_annot;
int size_annot_cplx;
resultPresence* res;
UC* uc;
uint32_t i = 1;
uint32_t* genome_ids = get_list_id_genomes(bft_annot, bft);
memcpy(kmer_comp_cpy, kmer_comp, nb_bytes * sizeof(uint8_t));
res = isKmerPresent(&(bft->node), bft, lvl_root, kmer_comp_cpy, bft->k);
if (res->link_child == NULL){
id = genome_ids[i] + old_nb_genome_ids_bft;
if (overlap_genome_ids) id--;
if (id >= pow2_imin){
pow2_imin = round_up_next_highest_power2(id);
size_id = get_nb_bytes_power2_annot_bis(id, pow2_imin);
}
memcpy(kmer_comp_cpy, kmer_comp, nb_bytes * sizeof(uint8_t));
if (pos_start_search == -1) insertKmer_Node(&(bft->node), bft, lvl_root, kmer_comp_cpy, bft->k, kmer_comp, id, size_id, 0);
else insertKmer_Node(&(bft->node), bft, lvl_root, kmer_comp_cpy, bft->k, kmer_comp, id, size_id, pos_start_search);
i++;
if (i <= genome_ids[0]){
free(res);
memcpy(kmer_comp_cpy, kmer_comp, nb_bytes * sizeof(uint8_t));
res = isKmerPresent(&(bft->node), bft, lvl_root, kmer_comp_cpy, bft->k);
}
}
for (; i <= genome_ids[0]; i++){
id = genome_ids[i] + old_nb_genome_ids_bft;
if (overlap_genome_ids) id--;
if (id >= pow2_imin){
pow2_imin = round_up_next_highest_power2(id);
size_id = get_nb_bytes_power2_annot_bis(id, pow2_imin);
}
if (res->posFilter2 != 0){
uc = (UC*)res->container;
get_annot(uc, &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx,
res->posFilter2, res->posFilter3, res->pos_sub_bucket);
}
else{
uc = &(((UC*)((CC*)res->container)->children)[res->bucket]);
get_annot(uc, &annot, &annot_ext, &annot_cplx, &size_annot, &size_annot_cplx,
res->posFilter2, res->posFilter3, res->pos_sub_bucket);
}
compute_best_mode(bft->ann_inf, bft->comp_set_colors, annot, size_annot, annot_ext, 1, id, size_id);
if (bft->ann_inf->last_added != id){
if (bft->ann_inf->min_size > uc->size_annot){
if ((annot_ext == NULL) || (bft->ann_inf->min_size > uc->size_annot+1))
annot_ext = realloc_annotation(uc, res->posFilter2, res->posFilter3, bft->ann_inf->min_size, 0, res->pos_sub_bucket);
}
modify_mode_annotation(bft->ann_inf, &(uc->suffixes[res->pos_sub_bucket * (res->posFilter2 + uc->size_annot) + res->posFilter2]),
uc->size_annot, annot_ext, 1, id, size_id);
if ((annot_ext != NULL) && (annot_ext[0] == 0))
delete_extend_annots(uc, res->posFilter2, res->posFilter3, res->pos_sub_bucket, res->pos_sub_bucket, 0, 0, 1);
}
reinit_annotation_inform(bft->ann_inf);
}
free(res);
free(genome_ids);
return;
}
*/
|
convolution_sgemm_pack1to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack1to4_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
#if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
extern void im2col_sgemm_pack1to4_int8_sse_avx512vnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack1to4_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
extern void im2col_sgemm_pack1to4_int8_sse_avxvnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack1to4_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_AVX2 && __AVX__ && !__AVX2__
if (ncnn::cpu_support_x86_avx2())
{
extern void im2col_sgemm_pack1to4_int8_sse_avx2(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack1to4_int8_sse_avx2(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
extern void im2col_sgemm_pack1to4_int8_sse_xop(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack1to4_int8_sse_xop(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
if (inch >= 4)
{
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
#endif
}
else
{
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
#endif
}
{
#if __AVX2__
int remain_size_start = 0;
int nn_size = size >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
signed char* tmpptr = tmp.channel(i / 4);
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr[8] = img0[2];
tmpptr[9] = img1[2];
tmpptr[10] = img2[2];
tmpptr[11] = img3[2];
tmpptr[12] = img0[3];
tmpptr[13] = img1[3];
tmpptr[14] = img2[3];
tmpptr[15] = img3[3];
tmpptr += 16;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += size;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __AVX2__
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
signed char* tmpptr = tmp.channel(i / 2);
#endif
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __AVX2__
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
__m256i _sum00_12 = _mm256_setzero_si256();
__m256i _sum20_32 = _mm256_setzero_si256();
if (nn4 > 0)
{
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _sum10_02 = _mm256_setzero_si256();
__m256i _sum30_22 = _mm256_setzero_si256();
#else
__m256i _sum10_02 = _mm256_setzero_si256();
__m256i _sum01_13 = _mm256_setzero_si256();
__m256i _sum11_03 = _mm256_setzero_si256();
__m256i _sum30_22 = _mm256_setzero_si256();
__m256i _sum21_33 = _mm256_setzero_si256();
__m256i _sum31_23 = _mm256_setzero_si256();
#endif
int j = 0;
for (; j < nn4; j++)
{
__m128i _val0123 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val0123_16 = _mm256_cvtepi8_epi16(_val0123);
__m256i _val01_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(1, 1, 0, 0));
__m256i _val23_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(3, 3, 2, 2));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
__m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16);
_sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16);
_sum20_32 = _mm256_dpwssd_epi32(_sum20_32, _val23_16, _w01_16);
_sum30_22 = _mm256_dpwssd_epi32(_sum30_22, _val32_16, _w01_16);
#else
__m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16);
__m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16);
__m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16);
__m256i _sl20_31 = _mm256_mullo_epi16(_val23_16, _w01_16);
__m256i _sh20_31 = _mm256_mulhi_epi16(_val23_16, _w01_16);
__m256i _sl30_21 = _mm256_mullo_epi16(_val32_16, _w01_16);
__m256i _sh30_21 = _mm256_mulhi_epi16(_val32_16, _w01_16);
_sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_unpacklo_epi16(_sl00_11, _sh00_11));
_sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_unpacklo_epi16(_sl10_01, _sh10_01));
_sum01_13 = _mm256_add_epi32(_sum01_13, _mm256_unpackhi_epi16(_sl00_11, _sh00_11));
_sum11_03 = _mm256_add_epi32(_sum11_03, _mm256_unpackhi_epi16(_sl10_01, _sh10_01));
_sum20_32 = _mm256_add_epi32(_sum20_32, _mm256_unpacklo_epi16(_sl20_31, _sh20_31));
_sum30_22 = _mm256_add_epi32(_sum30_22, _mm256_unpacklo_epi16(_sl30_21, _sh30_21));
_sum21_33 = _mm256_add_epi32(_sum21_33, _mm256_unpackhi_epi16(_sl20_31, _sh20_31));
_sum31_23 = _mm256_add_epi32(_sum31_23, _mm256_unpackhi_epi16(_sl30_21, _sh30_21));
#endif
tmpptr += 16;
kptr0 += 16;
}
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02);
_sum20_32 = _mm256_hadd_epi32(_sum20_32, _sum30_22);
_sum00_12 = _mm256_permute4x64_epi64(_sum00_12, _MM_SHUFFLE(2, 1, 3, 0));
_sum20_32 = _mm256_permute4x64_epi64(_sum20_32, _MM_SHUFFLE(2, 1, 3, 0));
#else
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_12, _sum10_02);
_tmp1 = _mm256_unpacklo_epi32(_sum01_13, _sum11_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_12, _sum10_02);
_tmp3 = _mm256_unpackhi_epi32(_sum01_13, _sum11_03);
_sum00_12 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_02 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum01_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum11_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum20_32, _sum30_22);
_tmp1 = _mm256_unpacklo_epi32(_sum21_33, _sum31_23);
_tmp2 = _mm256_unpackhi_epi32(_sum20_32, _sum30_22);
_tmp3 = _mm256_unpackhi_epi32(_sum21_33, _sum31_23);
_sum20_32 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum30_22 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum21_33 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum31_23 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_12 = _mm256_add_epi32(_sum00_12, _sum10_02);
_sum01_13 = _mm256_add_epi32(_sum01_13, _sum11_03);
_sum00_12 = _mm256_add_epi32(_sum00_12, _sum01_13);
_sum20_32 = _mm256_add_epi32(_sum20_32, _sum30_22);
_sum21_33 = _mm256_add_epi32(_sum21_33, _sum31_23);
_sum20_32 = _mm256_add_epi32(_sum20_32, _sum21_33);
__m256i _perm_mask = _mm256_set_epi32(6, 4, 3, 1, 7, 5, 2, 0);
_sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask);
_sum20_32 = _mm256_permutevar8x32_epi32(_sum20_32, _perm_mask);
#endif
}
__m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0);
__m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1);
__m128i _sum20 = _mm256_extracti128_si256(_sum20_32, 0);
__m128i _sum30 = _mm256_extracti128_si256(_sum20_32, 1);
int j = 0;
for (; j < nn1; j++)
{
__m128i _val01 = _mm_set_epi16(tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[0], tmpptr[0], tmpptr[0], tmpptr[0]);
__m128i _val23 = _mm_set_epi16(tmpptr[3], tmpptr[3], tmpptr[3], tmpptr[3], tmpptr[2], tmpptr[2], tmpptr[2], tmpptr[2]);
__m128i _w0123 = _mm_set_epi16(kptr0[3], kptr0[2], kptr0[1], kptr0[0], kptr0[3], kptr0[2], kptr0[1], kptr0[0]);
__m128i _sl00 = _mm_mullo_epi16(_val01, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val01, _w0123);
__m128i _sl10 = _mm_mullo_epi16(_val23, _w0123);
__m128i _sh10 = _mm_mulhi_epi16(_val23, _w0123);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00));
_sum20 = _mm_add_epi32(_sum20, _mm_unpacklo_epi16(_sl10, _sh10));
_sum30 = _mm_add_epi32(_sum30, _mm_unpackhi_epi16(_sl10, _sh10));
tmpptr += 4;
kptr0 += 4;
}
_mm_storeu_si128((__m128i*)outptr0, _sum00);
_mm_storeu_si128((__m128i*)(outptr0 + 4), _sum10);
_mm_storeu_si128((__m128i*)(outptr0 + 8), _sum20);
_mm_storeu_si128((__m128i*)(outptr0 + 12), _sum30);
outptr0 += 16;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
#if __AVX2__
__m256i _sum00_12 = _mm256_setzero_si256();
#else
__m128i _sum00 = _mm_setzero_si128();
__m128i _sum10 = _mm_setzero_si128();
#endif
if (nn4 > 0)
{
#if __AVX2__
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _sum10_02 = _mm256_setzero_si256();
#else
__m256i _sum10_02 = _mm256_setzero_si256();
__m256i _sum01_13 = _mm256_setzero_si256();
__m256i _sum11_03 = _mm256_setzero_si256();
#endif
#else
#if __XOP__
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
#else
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum02 = _mm_setzero_si128();
__m128i _sum03 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
__m128i _sum12 = _mm_setzero_si128();
__m128i _sum13 = _mm_setzero_si128();
#endif
#endif
int j = 0;
for (; j < nn4; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
_val01_16 = _mm256_permute4x64_epi64(_val01_16, _MM_SHUFFLE(1, 1, 0, 0));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16);
_sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16);
#else
__m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16);
__m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16);
__m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16);
_sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_unpacklo_epi16(_sl00_11, _sh00_11));
_sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_unpacklo_epi16(_sl10_01, _sh10_01));
_sum01_13 = _mm256_add_epi32(_sum01_13, _mm256_unpackhi_epi16(_sl00_11, _sh00_11));
_sum11_03 = _mm256_add_epi32(_sum11_03, _mm256_unpackhi_epi16(_sl10_01, _sh10_01));
#endif
#else
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
_val01 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
_val01 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
__m128i _val0 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _val1 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(3, 2, 3, 2));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
#if __XOP__
_sum00 = _mm_maddd_epi16(_val0, _w0, _sum00);
_sum01 = _mm_maddd_epi16(_val0, _w1, _sum01);
_sum10 = _mm_maddd_epi16(_val1, _w0, _sum10);
_sum11 = _mm_maddd_epi16(_val1, _w1, _sum11);
#else
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl01 = _mm_mullo_epi16(_val0, _w1);
__m128i _sh01 = _mm_mulhi_epi16(_val0, _w1);
__m128i _sl10 = _mm_mullo_epi16(_val1, _w0);
__m128i _sh10 = _mm_mulhi_epi16(_val1, _w0);
__m128i _sl11 = _mm_mullo_epi16(_val1, _w1);
__m128i _sh11 = _mm_mulhi_epi16(_val1, _w1);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum01 = _mm_add_epi32(_sum01, _mm_unpackhi_epi16(_sl00, _sh00));
_sum02 = _mm_add_epi32(_sum02, _mm_unpacklo_epi16(_sl01, _sh01));
_sum03 = _mm_add_epi32(_sum03, _mm_unpackhi_epi16(_sl01, _sh01));
_sum10 = _mm_add_epi32(_sum10, _mm_unpacklo_epi16(_sl10, _sh10));
_sum11 = _mm_add_epi32(_sum11, _mm_unpackhi_epi16(_sl10, _sh10));
_sum12 = _mm_add_epi32(_sum12, _mm_unpacklo_epi16(_sl11, _sh11));
_sum13 = _mm_add_epi32(_sum13, _mm_unpackhi_epi16(_sl11, _sh11));
#endif
#endif
tmpptr += 8;
kptr0 += 16;
}
#if __AVX2__
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02);
_sum00_12 = _mm256_permute4x64_epi64(_sum00_12, _MM_SHUFFLE(2, 1, 3, 0));
#else
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_12, _sum10_02);
_tmp1 = _mm256_unpacklo_epi32(_sum01_13, _sum11_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_12, _sum10_02);
_tmp3 = _mm256_unpackhi_epi32(_sum01_13, _sum11_03);
_sum00_12 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_02 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum01_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum11_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_12 = _mm256_add_epi32(_sum00_12, _sum10_02);
_sum01_13 = _mm256_add_epi32(_sum01_13, _sum11_03);
_sum00_12 = _mm256_add_epi32(_sum00_12, _sum01_13);
__m256i _perm_mask = _mm256_set_epi32(6, 4, 3, 1, 7, 5, 2, 0);
_sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask);
#endif
#else
#if __XOP__
_sum00 = _mm_hadd_epi32(_sum00, _sum01);
_sum10 = _mm_hadd_epi32(_sum10, _sum11);
#else
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum00, _sum01);
_tmp1 = _mm_unpacklo_epi32(_sum02, _sum03);
_tmp2 = _mm_unpackhi_epi32(_sum00, _sum01);
_tmp3 = _mm_unpackhi_epi32(_sum02, _sum03);
_sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum10, _sum11);
_tmp1 = _mm_unpacklo_epi32(_sum12, _sum13);
_tmp2 = _mm_unpackhi_epi32(_sum10, _sum11);
_tmp3 = _mm_unpackhi_epi32(_sum12, _sum13);
_sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00 = _mm_add_epi32(_sum00, _sum01);
_sum02 = _mm_add_epi32(_sum02, _sum03);
_sum10 = _mm_add_epi32(_sum10, _sum11);
_sum12 = _mm_add_epi32(_sum12, _sum13);
_sum00 = _mm_add_epi32(_sum00, _sum02);
_sum10 = _mm_add_epi32(_sum10, _sum12);
#endif
#endif
}
#if __AVX2__
__m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0);
__m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1);
#endif
int j = 0;
for (; j < nn1; j++)
{
__m128i _val = _mm_set_epi16(tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[0], tmpptr[0], tmpptr[0], tmpptr[0]);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
_w0123 = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
_w0123 = _mm_unpacklo_epi8(_w0123, _extw0123);
#endif
_w0123 = _mm_shuffle_epi32(_w0123, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _sl00 = _mm_mullo_epi16(_val, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val, _w0123);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00));
tmpptr += 2;
kptr0 += 4;
}
_mm_storeu_si128((__m128i*)outptr0, _sum00);
_mm_storeu_si128((__m128i*)(outptr0 + 4), _sum10);
outptr0 += 8;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
__m128i _sum0 = _mm_setzero_si128();
if (nn4 > 0)
{
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
__m128i _val0 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
_val0 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl01 = _mm_mullo_epi16(_val0, _w1);
__m128i _sh01 = _mm_mulhi_epi16(_val0, _w1);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl01, _sh01));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl01, _sh01));
tmpptr += 4;
kptr0 += 16;
}
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum0, _sum1);
_tmp1 = _mm_unpacklo_epi32(_sum2, _sum3);
_tmp2 = _mm_unpackhi_epi32(_sum0, _sum1);
_tmp3 = _mm_unpackhi_epi32(_sum2, _sum3);
_sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum0 = _mm_add_epi32(_sum0, _sum2);
}
int j = 0;
for (; j < nn1; j++)
{
__m128i _val = _mm_set1_epi16(tmpptr[0]);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
_w0123 = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
_w0123 = _mm_unpacklo_epi8(_w0123, _extw0123);
#endif
__m128i _sl00 = _mm_mullo_epi16(_val, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val, _w0123);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
tmpptr += 1;
kptr0 += 4;
}
_mm_storeu_si128((__m128i*)outptr0, _sum0);
outptr0 += 4;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack1to4_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 4a-4b-maxk-inch/4a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
if (inch >= 4)
kernel_tm.create(16 * maxk, inch / 4 + inch % 4, outch / 4, (size_t)1u);
else
kernel_tm.create(4 * maxk, inch, outch / 4, (size_t)1u);
for (int q = 0; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
int p = 0;
for (; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
for (; p < inch; p++)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p);
g00[0] = k00[k];
g00++;
}
}
}
}
}
static void convolution_im2col_sgemm_pack1to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
signed char* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
ptr[2] = sptr[stride_w * 2];
ptr[3] = sptr[stride_w * 3];
sptr += stride_w * 4;
ptr += 4;
}
for (; j + 1 < outw; j += 2)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
sptr += stride_w * 2;
ptr += 2;
}
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack1to4_int8_sse(bottom_im2col, top_blob, kernel, opt);
}
|
2.c | // The Computer Language Benchmarks Game
// https://salsa.debian.org/benchmarksgame-team/benchmarksgame/
//
// Contributed by Jeremy Zerfas
// Based on the C++ program from Jon Harrop, Alex Mizrahi, and Bruno Coutinho.
#include <apr_pools.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
// intptr_t should be the native integer type on most sane systems.
typedef intptr_t intnative_t;
typedef struct tree_node {
struct tree_node *left_Node, *right_Node;
} tree_node;
// Create a binary tree of depth tree_Depth in memory_Pool and return a pointer
// to the created binary tree.
static tree_node *create_Tree(const intnative_t tree_Depth,
apr_pool_t *const memory_Pool) {
tree_node *const root_Node = apr_pcalloc(memory_Pool, sizeof(tree_node));
// If tree_Depth is one or more then recursively call create_Tree() in order
// to create the left and right subtrees.
if (tree_Depth > 0) {
root_Node->left_Node = create_Tree(tree_Depth - 1, memory_Pool);
root_Node->right_Node = create_Tree(tree_Depth - 1, memory_Pool);
}
return root_Node;
}
// Compute and return the checksum for the binary tree that has root_Node as the
// root node.
static intnative_t compute_Tree_Checksum(const tree_node *const root_Node) {
// If there are subtrees then recursively call compute_Tree_Checksum() on
// them and return 1 plus the checksum of those subtrees.
if (root_Node->left_Node && root_Node->right_Node)
return compute_Tree_Checksum(root_Node->left_Node) +
compute_Tree_Checksum(root_Node->right_Node) + 1;
// If the function gets here then this is a single node tree so just return
// 1 as the checksum.
return 1;
}
int main(int argc, char *argv[]) {
// Set minimum_Tree_Depth to 4 and maximum_Tree_Depth to the maximum of what
// was specified as the argument to the program and minimum_Tree_Depth+2.
const intnative_t minimum_Tree_Depth = 4,
maximum_Tree_Depth = atoi(argv[1]) < minimum_Tree_Depth + 2
? minimum_Tree_Depth + 2
: atoi(argv[1]);
apr_initialize();
// Create a memory_Pool which will be used for storing both the stretch_Tree
// and the long_Lived_Tree.
apr_pool_t *memory_Pool;
apr_pool_create_unmanaged(&memory_Pool);
// Create a stretch_Tree of depth maximum_Tree_Depth+1, compute its
// checksum, and print its statistics. This work could be done in parallel
// along with all the other tree processing but APR memory pools aren't
// quite as streamlined as other memory pool implementations so it uses less
// resources to do this work by itself and then clear the memory_Pool so
// that most of the memory that was already allocated for the stretch_Tree
// can be reused for the upcoming long_Lived_Tree work rather than having
// APR allocate more memory for memory pools. Unfortunately since the
// long_Lived_Tree is about half the size of the stretch_Tree, this ends up
// wasting about half the memory that was being used by the stretch_Tree.
// APR subpools could be used to use that otherwise wasted memory for the
// processing of other trees that will be done later but it appears subpools
// only work with managed pools (even though APR's documentation for the
// apr_pool_create_unmanaged_ex() function seems to suggest that it possibly
// should work for unmanaged pools too) which are noticeably slower than
// unmanaged memory pools.
tree_node *stretch_Tree = create_Tree(maximum_Tree_Depth + 1, memory_Pool);
printf("stretch tree of depth %jd\t check: %jd\n",
(intmax_t)maximum_Tree_Depth + 1,
(intmax_t)compute_Tree_Checksum(stretch_Tree));
apr_pool_clear(memory_Pool);
// The long_Lived_Tree will be created in just a little bit simultaneously
// (assuming OpenMP was enabled and the program is running on a multi-
// processor system) while the rest of the trees are also being processed.
// long_Lived_Tree will store the reference to it which will remain valid
// until near the end of the program.
tree_node *long_Lived_Tree;
// These will be used to store checksums for the various trees so the
// statistics for the various trees can be output in the correct order
// later.
intnative_t long_Lived_Tree_Checksum,
tree_Checksums[(maximum_Tree_Depth - minimum_Tree_Depth + 2) / 2];
#pragma omp parallel
{
// Have one thread create the long_Lived_Tree of depth
// maximum_Tree_Depth in the memory_Pool which was already previously
// used for the stretch_Tree, compute the long_Lived_Tree_Checksum, and
// then just leave the long_Lived_Tree alone for a while while the rest
// of the binary trees finish processing (which should have
// simultaneously been started to be processed by any other available
// threads).
#pragma omp single nowait
{
long_Lived_Tree = create_Tree(maximum_Tree_Depth, memory_Pool);
long_Lived_Tree_Checksum = compute_Tree_Checksum(long_Lived_Tree);
}
// Create a thread_Memory_Pool for this thread to use.
apr_pool_t *thread_Memory_Pool;
apr_pool_create_unmanaged(&thread_Memory_Pool);
#pragma omp for nowait
for (intnative_t tree_Depth = minimum_Tree_Depth;
tree_Depth <= maximum_Tree_Depth; tree_Depth += 2) {
// Create a bunch of binary trees of depth tree_Depth, compute their
// checksums, and add the checksums to the total_Trees_Checksum.
intnative_t total_Trees_Checksum = 0;
for (intnative_t iterations =
1 << (maximum_Tree_Depth - tree_Depth + minimum_Tree_Depth);
iterations-- > 0;) {
apr_pool_clear(thread_Memory_Pool);
total_Trees_Checksum +=
compute_Tree_Checksum(create_Tree(tree_Depth, thread_Memory_Pool));
}
// Record the total_Trees_Checksum for the trees of depth
// tree_Depth.
tree_Checksums[(tree_Depth - minimum_Tree_Depth) / 2] =
total_Trees_Checksum;
}
apr_pool_destroy(thread_Memory_Pool);
}
// Print the statistics for all of the various tree depths.
for (intnative_t tree_Depth = minimum_Tree_Depth;
tree_Depth <= maximum_Tree_Depth; tree_Depth += 2)
printf(
"%jd\t trees of depth %jd\t check: %jd\n",
(intmax_t)1 << (maximum_Tree_Depth - tree_Depth + minimum_Tree_Depth),
(intmax_t)tree_Depth,
(intmax_t)tree_Checksums[(tree_Depth - minimum_Tree_Depth) / 2]);
// Print the statistics for the long_Lived_Tree that was processed earlier
// and then delete the memory_Pool that still is storing it up to this
// point. Note that although the long_Lived_Tree variable isn't used here,
// it still is in scope and valid to use until the call to
// apr_pool_destroy(memory_Pool) is made.
printf("long lived tree of depth %jd\t check: %jd\n",
(intmax_t)maximum_Tree_Depth, (intmax_t)long_Lived_Tree_Checksum);
apr_pool_destroy(memory_Pool);
apr_terminate();
return 0;
}
|
parallel_sections_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized
void foo();
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel sections'}}
#pragma omp parallel sections
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel sections'}}
#pragma omp parallel sections foo
void test_no_clause() {
int i;
#pragma omp parallel sections
{
foo();
}
// expected-error@+2 {{the statement for '#pragma omp parallel sections' must be a compound statement}}
#pragma omp parallel sections
++i;
#pragma omp parallel sections
{
foo();
foo(); // expected-error {{statement in 'omp parallel sections' directive must be enclosed into a section region}}
}
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel sections
{
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
#pragma omp section
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L3;
else if (i == 8) {
L3:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
goto L3; // expected-error {{use of undeclared label 'L3'}}
}
void test_invalid_clause() {
int i;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel sections' are ignored}}
#pragma omp parallel sections foo bar
{
foo();
// expected-error@+1 {{unexpected OpenMP clause 'nowait' in directive '#pragma omp section'}}
#pragma omp section nowait
;
}
}
void test_non_identifiers() {
int i, x;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel sections' are ignored}}
#pragma omp parallel sections;
{
foo();
}
// expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp parallel sections'}}
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel sections' are ignored}}
#pragma omp parallel sections linear(x);
{
foo();
}
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel sections' are ignored}}
#pragma omp parallel sections private(x);
{
foo();
}
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel sections' are ignored}}
#pragma omp parallel sections, private(x);
{
foo();
}
}
void test_private() {
int i;
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel sections private(
{
foo();
}
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel sections private(,
{
foo();
}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel sections private(, )
{
foo();
}
// expected-error@+1 {{expected expression}}
#pragma omp parallel sections private()
{
foo();
}
// expected-error@+1 {{expected expression}}
#pragma omp parallel sections private(int)
{
foo();
}
// expected-error@+1 {{expected variable name}}
#pragma omp parallel sections private(0)
{
foo();
}
int x, y, z;
#pragma omp parallel sections private(x)
{
foo();
}
#pragma omp parallel sections private(x, y)
{
foo();
}
#pragma omp parallel sections private(x, y, z)
{
foo();
}
}
void test_lastprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel sections lastprivate(
{
foo();
}
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel sections lastprivate(,
{
foo();
}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel sections lastprivate(, )
{
foo();
}
// expected-error@+1 {{expected expression}}
#pragma omp parallel sections lastprivate()
{
foo();
}
// expected-error@+1 {{expected expression}}
#pragma omp parallel sections lastprivate(int)
{
foo();
}
// expected-error@+1 {{expected variable name}}
#pragma omp parallel sections lastprivate(0)
{
foo();
}
int x, y, z;
#pragma omp parallel sections lastprivate(x)
{
foo();
}
#pragma omp parallel sections lastprivate(x, y)
{
foo();
}
#pragma omp parallel sections lastprivate(x, y, z)
{
foo();
}
}
void test_firstprivate() {
int i;
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel sections firstprivate(
{
foo();
}
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel sections firstprivate(,
{
foo();
}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel sections firstprivate(, )
{
foo();
}
// expected-error@+1 {{expected expression}}
#pragma omp parallel sections firstprivate()
{
foo();
}
// expected-error@+1 {{expected expression}}
#pragma omp parallel sections firstprivate(int)
{
foo();
}
// expected-error@+1 {{expected variable name}}
#pragma omp parallel sections firstprivate(0)
{
foo();
}
int x, y, z;
#pragma omp parallel sections lastprivate(x) firstprivate(x)
{
foo();
}
#pragma omp parallel sections lastprivate(x, y) firstprivate(x, y)
{
foo();
}
#pragma omp parallel sections lastprivate(x, y, z) firstprivate(x, y, z)
{
foo();
}
}
|
mandelbrot.c | /*
* This example is based on the code of Andrew V. Adinetz
* https://github.com/canonizer/mandelbrot-dyn
* Licensed under The MIT License
*/
#include <omp.h>
#include <complex.h>
#include <stdio.h>
#include <stdlib.h>
#include "pngwriter.h"
// Maximum number of iterations
const int MAX_ITER_COUNT = 512;
// Marker for different iteration counts
const int DIFF_ITER_COUNT = -1;
// Maximum recursion depth
const int MAX_DEPTH = 6;
// Region size below which do per-pixel
const int MIN_SIZE = 32;
// Subdivision factor along each axis
const int SUBDIV = 4;
// |z|^2 of a complex number z
float abs2(complex v)
{
return creal(v) * creal(v) + cimag(v) * cimag(v);
}
// The kernel to count per-pixel values of the portion of the Mandelbrot set
// Does not need to be edited
int kernel(int w, int h, complex cmin, complex cmax,
int x, int y)
{
complex dc = cmax - cmin;
float fx = (float)x / w;
float fy = (float)y / h;
complex c = cmin + fx * creal(dc) + fy * cimag(dc) * I;
int iteration = 0;
complex z = c;
while (iteration < MAX_ITER_COUNT && abs2(z) < 2 * 2) {
z = z * z + c;
iteration++;
}
return iteration;
}
/* Computes the Mandelbrot image recursively
* At each call, the image is divided into smaller blocks (by a factor of
* subdiv), and the function is called recursively with arguments corresponding
* to subblock. When maximum recursion depth is reached or size of block
* is smaller than predefined minimum, one starts to calculate actual pixel
* values
*
* - - - - - - - - ----- -----
* | | | | | |
* | | ----- -----
* | | --> --> ...
* | | ----- -----
* | | | | | |
* | | ----- -----
* ---------------
*/
void mandelbrot_block(int *iter_counts, int w, int h, complex cmin,
complex cmax, int x0, int y0, int d, int depth)
{
// TODO Parallelize the recursive function call
// with OpenMP tasks
int block_size = d / SUBDIV;
if (depth + 1 < MAX_DEPTH && block_size > MIN_SIZE) {
// Subdivide recursively
for (int i = 0; i < SUBDIV; i++) {
for (int j = 0; j < SUBDIV; j++) {
#pragma omp task
mandelbrot_block(iter_counts, w, h, cmin, cmax,
x0 + i * block_size, y0 + j * block_size,
d / SUBDIV, depth + 1);
}
}
} else {
// Last recursion level reached, calculate the values
for (int i = x0; i < x0 + d; i++) {
for (int j = y0; j < y0 + d; j++) {
iter_counts[j * w + i] = kernel(w, h, cmin, cmax, i, j);
}
}
}
}
int main(int argc, char **argv)
{
// Picture size, should be power of two
const int w = 512;
const int h = w;
int *iter_counts;
complex cmin, cmax;
int pic_bytes = w * h * sizeof(int);
iter_counts = (int *)malloc(pic_bytes);
cmin = -1.5 + -1.0 * I;
cmax = 0.5 + 1.0 * I;
double t1 = omp_get_wtime();
// TODO create parallel region. How many threads should be calling
// mandelbrot_block in this uppermost level?
#pragma omp parallel
#pragma omp single
{
mandelbrot_block(iter_counts, w, h, cmin, cmax,
0, 0, w, 1);
}
double t2 = omp_get_wtime();
// Save the image to a PNG file
save_png(iter_counts, w, h, "mandelbrot.png");
double walltime = t2 - t1;
// Print the timings
printf("Mandelbrot set computed in %.3lf s, at %.3lf Mpix/s\n",
walltime, h * w * 1e-6 / walltime);
free(iter_counts);
return 0;
}
|
DenseVector.h | //=================================================================================================
/*!
// \file blaze/math/smp/openmp/DenseVector.h
// \brief Header file for the OpenMP-based dense vector SMP implementation
//
// Copyright (C) 2013 Klaus Iglberger - All Rights Reserved
//
// This file is part of the Blaze library. You can redistribute it and/or modify it under
// the terms of the New (Revised) BSD License. Redistribution and use in source and binary
// forms, with or without modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of
// conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list
// of conditions and the following disclaimer in the documentation and/or other materials
// provided with the distribution.
// 3. Neither the names of the Blaze development group nor the names of its contributors
// may be used to endorse or promote products derived from this software without specific
// prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
// OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
// SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
// DAMAGE.
*/
//=================================================================================================
#ifndef _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_
#define _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_
//*************************************************************************************************
// Includes
//*************************************************************************************************
#include <omp.h>
#include <blaze/math/Aliases.h>
#include <blaze/math/constraints/SMPAssignable.h>
#include <blaze/math/expressions/DenseVector.h>
#include <blaze/math/expressions/SparseVector.h>
#include <blaze/math/Functions.h>
#include <blaze/math/simd/SIMDTrait.h>
#include <blaze/math/smp/ParallelSection.h>
#include <blaze/math/smp/SerialSection.h>
#include <blaze/math/traits/SubvectorExprTrait.h>
#include <blaze/math/typetraits/IsDenseVector.h>
#include <blaze/math/typetraits/IsSMPAssignable.h>
#include <blaze/math/views/Subvector.h>
#include <blaze/system/SMP.h>
#include <blaze/util/Assert.h>
#include <blaze/util/EnableIf.h>
#include <blaze/util/logging/FunctionTrace.h>
#include <blaze/util/mpl/And.h>
#include <blaze/util/mpl/Not.h>
#include <blaze/util/mpl/Or.h>
#include <blaze/util/StaticAssert.h>
#include <blaze/util/Types.h>
#include <blaze/util/typetraits/IsSame.h>
namespace blaze {
//=================================================================================================
//
// PLAIN ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a dense vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a dense
// vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size };
const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame<ET1,ET2>::value );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) );
const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
assign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
assign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
assign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
assign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP assignment of a sparse vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be assigned.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP assignment of a sparse
// vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side sparse vector
, bool TF2 > // Transpose flag of the right-hand side sparse vector
void smpAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t sizePerThread( (~lhs).size() / threads + addon );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
assign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be assigned.
// \return void
//
// This function implements the default OpenMP-based SMP assignment to a dense vector. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
assign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be assigned.
// \return void
//
// This function performs the OpenMP-based SMP assignment to a dense vector. Due to the
// explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
assign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// ADDITION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a dense vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be added.
// \return void
//
// This function is the backend implementation the OpenMP-based SMP addition assignment of a
// dense vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpAddAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size };
const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame<ET1,ET2>::value );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) );
const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
addAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
addAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
addAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
addAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP addition assignment of a sparse vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be added.
// \return void
//
// This function is the backend implementation the OpenMP-based SMP addition assignment of a
// sparse vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side sparse vector
, bool TF2 > // Transpose flag of the right-hand side sparse vector
void smpAddAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t sizePerThread( (~lhs).size() / threads + addon );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
addAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be added.
// \return void
//
// This function implements the default OpenMP-based SMP addition assignment to a dense vector.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
addAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be added.
// \return void
//
// This function implements the OpenMP-based SMP addition assignment to a dense vector. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands are
// not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
addAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpAddAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// SUBTRACTION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a dense vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be subtracted.
// \return void
//
// This function is the backend implementation the OpenMP-based SMP subtraction assignment of a
// dense vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpSubAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size };
const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame<ET1,ET2>::value );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) );
const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
subAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
subAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
subAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
subAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP subtraction assignment of a sparse vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be subtracted.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP subtraction assignment of
// a sparse vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side sparse vector
, bool TF2 > // Transpose flag of the right-hand side sparse vector
void smpSubAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t sizePerThread( (~lhs).size() / threads + addon );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
subAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP subtraction assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be subtracted.
// \return void
//
// This function implements the default OpenMP-based SMP subtraction assignment of a vector to
// a dense vector. Due to the explicit application of the SFINAE principle, this function can
// only be selected by the compiler in case both operands are SMP-assignable and the element
// types of both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
subAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP subtraction assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be subtracted.
// \return void
//
// This function implements the OpenMP-based SMP subtraction assignment to a dense vector. Due
// to the explicit application of the SFINAE principle, this function can only be selected by
// the compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
subAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpSubAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// MULTIPLICATION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP multiplication assignment of a dense vector to a
// dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be multiplied.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP multiplication assignment
// of a dense vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpMultAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size };
const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame<ET1,ET2>::value );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) );
const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
multAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
multAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
multAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
multAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP multiplication assignment of a sparse vector to a
// dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side sparse vector to be multiplied.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP multiplication assignment
// of a sparse vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side sparse vector
, bool TF2 > // Transpose flag of the right-hand side sparse vector
void smpMultAssign_backend( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t sizePerThread( (~lhs).size() / threads + addon );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
multAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector to be multiplied.
// \return void
//
// This function implements the default OpenMP-based SMP multiplication assignment to a dense
// vector. Due to the explicit application of the SFINAE principle, this function can only be
// selected by the compiler in case both operands are SMP-assignable and the element types of
// both operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
multAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP multiplication assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector to be multiplied.
// \return void
//
// This function implements the OpenMP-based SMP multiplication assignment to a dense vector.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both
// operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
multAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpMultAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// DIVISION ASSIGNMENT
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Backend of the OpenMP-based SMP division assignment of a dense vector to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector divisor.
// \return void
//
// This function is the backend implementation of the OpenMP-based SMP division assignment of
// a dense vector to a dense vector.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side dense vector
, bool TF2 > // Transpose flag of the right-hand side dense vector
void smpDivAssign_backend( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" );
typedef ElementType_<VT1> ET1;
typedef ElementType_<VT2> ET2;
typedef SubvectorExprTrait_<VT1,aligned> AlignedTarget;
typedef SubvectorExprTrait_<VT1,unaligned> UnalignedTarget;
enum : size_t { SIMDSIZE = SIMDTrait< ElementType_<VT1> >::size };
const bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSame<ET1,ET2>::value );
const bool lhsAligned ( (~lhs).isAligned() );
const bool rhsAligned ( (~rhs).isAligned() );
const int threads ( omp_get_num_threads() );
const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL );
const size_t equalShare ( (~lhs).size() / threads + addon );
const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) );
const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) );
#pragma omp for schedule(dynamic,1) nowait
for( int i=0UL; i<threads; ++i )
{
const size_t index( i*sizePerThread );
if( index >= (~lhs).size() )
continue;
const size_t size( min( sizePerThread, (~lhs).size() - index ) );
if( simdEnabled && lhsAligned && rhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
divAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else if( simdEnabled && lhsAligned ) {
AlignedTarget target( subvector<aligned>( ~lhs, index, size ) );
divAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
else if( simdEnabled && rhsAligned ) {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
divAssign( target, subvector<aligned>( ~rhs, index, size ) );
}
else {
UnalignedTarget target( subvector<unaligned>( ~lhs, index, size ) );
divAssign( target, subvector<unaligned>( ~rhs, index, size ) );
}
}
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Default implementation of the OpenMP-based SMP division assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side vector divisor.
// \return void
//
// This function implements the default OpenMP-based SMP division assignment to a dense vector.
// Due to the explicit application of the SFINAE principle, this function can only be selected
// by the compiler in case both operands are SMP-assignable and the element types of both
// operands are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>
, Or< Not< IsSMPAssignable<VT1> >
, Not< IsSMPAssignable<VT2> > > > >
smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
divAssign( ~lhs, ~rhs );
}
/*! \endcond */
//*************************************************************************************************
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
/*!\brief Implementation of the OpenMP-based SMP division assignment to a dense vector.
// \ingroup smp
//
// \param lhs The target left-hand side dense vector.
// \param rhs The right-hand side dense vector divisor.
// \return void
//
// This function implements the OpenMP-based SMP division assignment to a dense vector. Due to
// the explicit application of the SFINAE principle, this function can only be selected by the
// compiler in case both operands are SMP-assignable and the element types of both operands
// are not SMP-assignable.\n
// This function must \b NOT be called explicitly! It is used internally for the performance
// optimized evaluation of expression templates. Calling this function explicitly might result
// in erroneous results and/or in compilation errors. Instead of using this function use the
// assignment operator.
*/
template< typename VT1 // Type of the left-hand side dense vector
, bool TF1 // Transpose flag of the left-hand side dense vector
, typename VT2 // Type of the right-hand side vector
, bool TF2 > // Transpose flag of the right-hand side vector
inline EnableIf_< And< IsDenseVector<VT1>, IsSMPAssignable<VT1>, IsSMPAssignable<VT2> > >
smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs )
{
BLAZE_FUNCTION_TRACE;
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT1> );
BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_<VT2> );
BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" );
BLAZE_PARALLEL_SECTION
{
if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) {
divAssign( ~lhs, ~rhs );
}
else {
#pragma omp parallel shared( lhs, rhs )
smpDivAssign_backend( ~lhs, ~rhs );
}
}
}
/*! \endcond */
//*************************************************************************************************
//=================================================================================================
//
// COMPILE TIME CONSTRAINTS
//
//=================================================================================================
//*************************************************************************************************
/*! \cond BLAZE_INTERNAL */
namespace {
BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE );
}
/*! \endcond */
//*************************************************************************************************
} // namespace blaze
#endif
|
GrB_Vector_nvals.c | //------------------------------------------------------------------------------
// GrB_Vector_nvals: number of entries in a sparse vector
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#include "GB.h"
GrB_Info GrB_Vector_nvals // get the number of entries in a vector
(
GrB_Index *nvals, // number of entries
const GrB_Vector v // vector to query
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GrB_Vector_nvals (&nvals, v)") ;
GB_BURBLE_START ("GrB_Vector_nvals") ;
GB_RETURN_IF_NULL_OR_FAULTY (v) ;
ASSERT (GB_VECTOR_OK (v)) ;
//--------------------------------------------------------------------------
// get the number of entries
//--------------------------------------------------------------------------
GrB_Info info = GB_nvals (nvals, (GrB_Matrix) v, Context) ;
GB_BURBLE_END ;
#pragma omp flush
return (info) ;
}
|
dataset.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_DATASET_H_
#define LIGHTGBM_DATASET_H_
#include <LightGBM/config.h>
#include <LightGBM/feature_group.h>
#include <LightGBM/meta.h>
#include <LightGBM/utils/common.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <LightGBM/utils/random.h>
#include <LightGBM/utils/text_reader.h>
#include <string>
#include <functional>
#include <memory>
#include <mutex>
#include <unordered_set>
#include <utility>
#include <vector>
namespace LightGBM {
/*! \brief forward declaration */
class DatasetLoader;
/*!
* \brief This class is used to store some meta(non-feature) data for training data,
* e.g. labels, weights, initial scores, query level informations.
*
* Some details:
* 1. Label, used for training.
* 2. Weights, weighs of records, optional
* 3. Query Boundaries, necessary for lambdarank.
* The documents of i-th query is in [ query_boundaries[i], query_boundaries[i+1] )
* 4. Query Weights, auto calculate by weights and query_boundaries(if both of them are existed)
* the weight for i-th query is sum(query_boundaries[i] , .., query_boundaries[i+1]) / (query_boundaries[i + 1] - query_boundaries[i+1])
* 5. Initial score. optional. if existing, the model will boost from this score, otherwise will start from 0.
*/
class Metadata {
public:
/*!
* \brief Null constructor
*/
Metadata();
/*!
* \brief Initialization will load query level informations, since it is need for sampling data
* \param data_filename Filename of data
* \param init_score_filename Filename of initial score
*/
void Init(const char* data_filename, const char* initscore_file);
/*!
* \brief init as subset
* \param metadata Filename of data
* \param used_indices
* \param num_used_indices
*/
void Init(const Metadata& metadata, const data_size_t* used_indices, data_size_t num_used_indices);
/*!
* \brief Initial with binary memory
* \param memory Pointer to memory
*/
void LoadFromMemory(const void* memory);
/*! \brief Destructor */
~Metadata();
/*!
* \brief Initial work, will allocate space for label, weight(if exists) and query(if exists)
* \param num_data Number of training data
* \param weight_idx Index of weight column, < 0 means doesn't exists
* \param query_idx Index of query id column, < 0 means doesn't exists
*/
void Init(data_size_t num_data, int weight_idx, int query_idx);
/*!
* \brief Partition label by used indices
* \param used_indices Indices of local used
*/
void PartitionLabel(const std::vector<data_size_t>& used_indices);
/*!
* \brief Partition meta data according to local used indices if need
* \param num_all_data Number of total training data, including other machines' data on parallel learning
* \param used_data_indices Indices of local used training data
*/
void CheckOrPartition(data_size_t num_all_data,
const std::vector<data_size_t>& used_data_indices);
void SetLabel(const label_t* label, data_size_t len);
void SetWeights(const label_t* weights, data_size_t len);
void SetQuery(const data_size_t* query, data_size_t len);
/*!
* \brief Set initial scores
* \param init_score Initial scores, this class will manage memory for init_score.
*/
void SetInitScore(const double* init_score, data_size_t len);
/*!
* \brief Save binary data to file
* \param file File want to write
*/
void SaveBinaryToFile(const VirtualFileWriter* writer) const;
/*!
* \brief Get sizes in byte of this object
*/
size_t SizesInByte() const;
/*!
* \brief Get pointer of label
* \return Pointer of label
*/
inline const label_t* label() const { return label_.data(); }
/*!
* \brief Set label for one record
* \param idx Index of this record
* \param value Label value of this record
*/
inline void SetLabelAt(data_size_t idx, label_t value) {
label_[idx] = value;
}
/*!
* \brief Set Weight for one record
* \param idx Index of this record
* \param value Weight value of this record
*/
inline void SetWeightAt(data_size_t idx, label_t value) {
weights_[idx] = value;
}
/*!
* \brief Set Query Id for one record
* \param idx Index of this record
* \param value Query Id value of this record
*/
inline void SetQueryAt(data_size_t idx, data_size_t value) {
queries_[idx] = static_cast<data_size_t>(value);
}
/*!
* \brief Get weights, if not exists, will return nullptr
* \return Pointer of weights
*/
inline const label_t* weights() const {
if (!weights_.empty()) {
return weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get data boundaries on queries, if not exists, will return nullptr
* we assume data will order by query,
* the interval of [query_boundaris[i], query_boundaris[i+1])
* is the data indices for query i.
* \return Pointer of data boundaries on queries
*/
inline const data_size_t* query_boundaries() const {
if (!query_boundaries_.empty()) {
return query_boundaries_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get Number of queries
* \return Number of queries
*/
inline data_size_t num_queries() const { return num_queries_; }
/*!
* \brief Get weights for queries, if not exists, will return nullptr
* \return Pointer of weights for queries
*/
inline const label_t* query_weights() const {
if (!query_weights_.empty()) {
return query_weights_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get initial scores, if not exists, will return nullptr
* \return Pointer of initial scores
*/
inline const double* init_score() const {
if (!init_score_.empty()) {
return init_score_.data();
} else {
return nullptr;
}
}
/*!
* \brief Get size of initial scores
*/
inline int64_t num_init_score() const { return num_init_score_; }
/*! \brief Disable copy */
Metadata& operator=(const Metadata&) = delete;
/*! \brief Disable copy */
Metadata(const Metadata&) = delete;
private:
/*! \brief Load initial scores from file */
void LoadInitialScore(const char* initscore_file);
/*! \brief Load wights from file */
void LoadWeights();
/*! \brief Load query boundaries from file */
void LoadQueryBoundaries();
/*! \brief Load query wights */
void LoadQueryWeights();
/*! \brief Filename of current data */
std::string data_filename_;
/*! \brief Number of data */
data_size_t num_data_;
/*! \brief Number of weights, used to check correct weight file */
data_size_t num_weights_;
/*! \brief Label data */
std::vector<label_t> label_;
/*! \brief Weights data */
std::vector<label_t> weights_;
/*! \brief Query boundaries */
std::vector<data_size_t> query_boundaries_;
/*! \brief Query weights */
std::vector<label_t> query_weights_;
/*! \brief Number of querys */
data_size_t num_queries_;
/*! \brief Number of Initial score, used to check correct weight file */
int64_t num_init_score_;
/*! \brief Initial score */
std::vector<double> init_score_;
/*! \brief Queries data */
std::vector<data_size_t> queries_;
/*! \brief mutex for threading safe call */
std::mutex mutex_;
bool weight_load_from_file_;
bool query_load_from_file_;
bool init_score_load_from_file_;
};
/*! \brief Interface for Parser */
class Parser {
public:
/*! \brief virtual destructor */
virtual ~Parser() {}
/*!
* \brief Parse one line with label
* \param str One line record, string format, should end with '\0'
* \param out_features Output columns, store in (column_idx, values)
* \param out_label Label will store to this if exists
*/
virtual void ParseOneLine(const char* str,
std::vector<std::pair<int, double>>* out_features, double* out_label) const = 0;
virtual int NumFeatures() const = 0;
/*!
* \brief Create an object of parser, will auto choose the format depend on file
* \param filename One Filename of data
* \param num_features Pass num_features of this data file if you know, <=0 means don't know
* \param label_idx index of label column
* \return Object of parser
*/
static Parser* CreateParser(const char* filename, bool header, int num_features, int label_idx);
};
/*! \brief The main class of data set,
* which are used to training or validation
*/
class Dataset {
public:
friend DatasetLoader;
LIGHTGBM_EXPORT Dataset();
LIGHTGBM_EXPORT Dataset(data_size_t num_data);
void Construct(
std::vector<std::unique_ptr<BinMapper>>* bin_mappers,
int num_total_features,
const std::vector<std::vector<double>>& forced_bins,
int** sample_non_zero_indices,
const int* num_per_col,
int num_sample_col,
size_t total_sample_cnt,
const Config& io_config);
/*! \brief Destructor */
LIGHTGBM_EXPORT ~Dataset();
LIGHTGBM_EXPORT bool CheckAlign(const Dataset& other) const {
if (num_features_ != other.num_features_) {
return false;
}
if (num_total_features_ != other.num_total_features_) {
return false;
}
if (label_idx_ != other.label_idx_) {
return false;
}
for (int i = 0; i < num_features_; ++i) {
if (!FeatureBinMapper(i)->CheckAlign(*(other.FeatureBinMapper(i)))) {
return false;
}
}
return true;
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<double>& feature_values) {
if (is_finish_load_) { return; }
for (size_t i = 0; i < feature_values.size() && i < static_cast<size_t>(num_total_features_); ++i) {
int feature_idx = used_feature_map_[i];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, feature_values[i]);
}
}
}
inline void PushOneRow(int tid, data_size_t row_idx, const std::vector<std::pair<int, double>>& feature_values) {
if (is_finish_load_) { return; }
for (auto& inner_data : feature_values) {
if (inner_data.first >= num_total_features_) { continue; }
int feature_idx = used_feature_map_[inner_data.first];
if (feature_idx >= 0) {
const int group = feature2group_[feature_idx];
const int sub_feature = feature2subfeature_[feature_idx];
feature_groups_[group]->PushData(tid, sub_feature, row_idx, inner_data.second);
}
}
}
inline void PushOneData(int tid, data_size_t row_idx, int group, int sub_feature, double value) {
feature_groups_[group]->PushData(tid, sub_feature, row_idx, value);
}
inline int RealFeatureIndex(int fidx) const {
return real_feature_idx_[fidx];
}
inline int InnerFeatureIndex(int col_idx) const {
return used_feature_map_[col_idx];
}
inline int Feature2Group(int feature_idx) const {
return feature2group_[feature_idx];
}
inline int Feture2SubFeature(int feature_idx) const {
return feature2subfeature_[feature_idx];
}
inline uint64_t GroupBinBoundary(int group_idx) const {
return group_bin_boundaries_[group_idx];
}
inline uint64_t NumTotalBin() const {
return group_bin_boundaries_.back();
}
inline std::vector<int> ValidFeatureIndices() const {
std::vector<int> ret;
for (int i = 0; i < num_total_features_; ++i) {
if (used_feature_map_[i] >= 0) {
ret.push_back(i);
}
}
return ret;
}
void ReSize(data_size_t num_data);
void CopySubset(const Dataset* fullset, const data_size_t* used_indices, data_size_t num_used_indices, bool need_meta_data);
LIGHTGBM_EXPORT void FinishLoad();
LIGHTGBM_EXPORT bool SetFloatField(const char* field_name, const float* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetDoubleField(const char* field_name, const double* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool SetIntField(const char* field_name, const int* field_data, data_size_t num_element);
LIGHTGBM_EXPORT bool GetFloatField(const char* field_name, data_size_t* out_len, const float** out_ptr);
LIGHTGBM_EXPORT bool GetDoubleField(const char* field_name, data_size_t* out_len, const double** out_ptr);
LIGHTGBM_EXPORT bool GetIntField(const char* field_name, data_size_t* out_len, const int** out_ptr);
LIGHTGBM_EXPORT bool GetInt8Field(const char* field_name, data_size_t* out_len, const int8_t** out_ptr);
/*!
* \brief Save current dataset into binary file, will save to "filename.bin"
*/
LIGHTGBM_EXPORT void SaveBinaryFile(const char* bin_filename);
LIGHTGBM_EXPORT void DumpTextFile(const char* text_filename);
LIGHTGBM_EXPORT void CopyFeatureMapperFrom(const Dataset* dataset);
LIGHTGBM_EXPORT void CreateValid(const Dataset* dataset);
void ConstructHistograms(const std::vector<int8_t>& is_feature_used,
const data_size_t* data_indices, data_size_t num_data,
int leaf_idx,
std::vector<std::unique_ptr<OrderedBin>>* ordered_bins,
const score_t* gradients, const score_t* hessians,
score_t* ordered_gradients, score_t* ordered_hessians,
bool is_constant_hessian,
HistogramBinEntry* histogram_data) const;
void FixHistogram(int feature_idx, double sum_gradient, double sum_hessian, data_size_t num_data,
HistogramBinEntry* data) const;
inline data_size_t Split(int feature,
const uint32_t* threshold, int num_threshold, bool default_left,
data_size_t* data_indices, data_size_t num_data,
data_size_t* lte_indices, data_size_t* gt_indices) const {
const int group = feature2group_[feature];
const int sub_feature = feature2subfeature_[feature];
return feature_groups_[group]->Split(sub_feature, threshold, num_threshold, default_left, data_indices, num_data, lte_indices, gt_indices);
}
inline int SubFeatureBinOffset(int i) const {
const int sub_feature = feature2subfeature_[i];
if (sub_feature == 0) {
return 1;
} else {
return 0;
}
}
inline int FeatureNumBin(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->num_bin();
}
inline int8_t FeatureMonotone(int i) const {
if (monotone_types_.empty()) {
return 0;
} else {
return monotone_types_[i];
}
}
inline double FeaturePenalte(int i) const {
if (feature_penalty_.empty()) {
return 1;
} else {
return feature_penalty_[i];
}
}
bool HasMonotone() const {
if (monotone_types_.empty()) {
return false;
} else {
for (size_t i = 0; i < monotone_types_.size(); ++i) {
if (monotone_types_[i] != 0) {
return true;
}
}
return false;
}
}
inline int FeatureGroupNumBin(int group) const {
return feature_groups_[group]->num_total_bin_;
}
inline const BinMapper* FeatureBinMapper(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature].get();
}
inline const Bin* FeatureBin(int i) const {
const int group = feature2group_[i];
return feature_groups_[group]->bin_data_.get();
}
inline const Bin* FeatureGroupBin(int group) const {
return feature_groups_[group]->bin_data_.get();
}
inline bool FeatureGroupIsSparse(int group) const {
return feature_groups_[group]->is_sparse_;
}
inline BinIterator* FeatureIterator(int i) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->SubFeatureIterator(sub_feature);
}
inline BinIterator* FeatureGroupIterator(int group) const {
return feature_groups_[group]->FeatureGroupIterator();
}
inline double RealThreshold(int i, uint32_t threshold) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->BinToValue(threshold);
}
// given a real threshold, find the closest threshold bin
inline uint32_t BinThreshold(int i, double threshold_double) const {
const int group = feature2group_[i];
const int sub_feature = feature2subfeature_[i];
return feature_groups_[group]->bin_mappers_[sub_feature]->ValueToBin(threshold_double);
}
inline void CreateOrderedBins(std::vector<std::unique_ptr<OrderedBin>>* ordered_bins) const {
ordered_bins->resize(num_groups_);
OMP_INIT_EX();
#pragma omp parallel for schedule(guided)
for (int i = 0; i < num_groups_; ++i) {
OMP_LOOP_EX_BEGIN();
ordered_bins->at(i).reset(feature_groups_[i]->bin_data_->CreateOrderedBin());
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
/*!
* \brief Get meta data pointer
* \return Pointer of meta data
*/
inline const Metadata& metadata() const { return metadata_; }
/*! \brief Get Number of used features */
inline int num_features() const { return num_features_; }
/*! \brief Get Number of feature groups */
inline int num_feature_groups() const { return num_groups_;}
/*! \brief Get Number of total features */
inline int num_total_features() const { return num_total_features_; }
/*! \brief Get the index of label column */
inline int label_idx() const { return label_idx_; }
/*! \brief Get names of current data set */
inline const std::vector<std::string>& feature_names() const { return feature_names_; }
inline void set_feature_names(const std::vector<std::string>& feature_names) {
if (feature_names.size() != static_cast<size_t>(num_total_features_)) {
Log::Fatal("Size of feature_names error, should equal with total number of features");
}
feature_names_ = std::vector<std::string>(feature_names);
std::unordered_set<std::string> feature_name_set;
// replace ' ' in feature_names with '_'
bool spaceInFeatureName = false;
for (auto& feature_name : feature_names_) {
// check ascii
if (!Common::CheckASCII(feature_name)) {
Log::Fatal("Do not support non-ASCII characters in feature name.");
}
// check json
if (!Common::CheckAllowedJSON(feature_name)) {
Log::Fatal("Do not support special JSON characters in feature name.");
}
if (feature_name.find(' ') != std::string::npos) {
spaceInFeatureName = true;
std::replace(feature_name.begin(), feature_name.end(), ' ', '_');
}
if (feature_name_set.count(feature_name) > 0) {
Log::Fatal("Feature (%s) appears more than one time.", feature_name.c_str());
}
feature_name_set.insert(feature_name);
}
if (spaceInFeatureName) {
Log::Warning("Find whitespaces in feature_names, replace with underlines");
}
}
inline std::vector<std::string> feature_infos() const {
std::vector<std::string> bufs;
for (int i = 0; i < num_total_features_; i++) {
int fidx = used_feature_map_[i];
if (fidx == -1) {
bufs.push_back("none");
} else {
const auto bin_mapper = FeatureBinMapper(fidx);
bufs.push_back(bin_mapper->bin_info());
}
}
return bufs;
}
void ResetConfig(const char* parameters);
/*! \brief Get Number of data */
inline data_size_t num_data() const { return num_data_; }
/*! \brief Disable copy */
Dataset& operator=(const Dataset&) = delete;
/*! \brief Disable copy */
Dataset(const Dataset&) = delete;
void addFeaturesFrom(Dataset* other);
private:
std::string data_filename_;
/*! \brief Store used features */
std::vector<std::unique_ptr<FeatureGroup>> feature_groups_;
/*! \brief Mapper from real feature index to used index*/
std::vector<int> used_feature_map_;
/*! \brief Number of used features*/
int num_features_;
/*! \brief Number of total features*/
int num_total_features_;
/*! \brief Number of total data*/
data_size_t num_data_;
/*! \brief Store some label level data*/
Metadata metadata_;
/*! \brief index of label column */
int label_idx_ = 0;
/*! \brief Threshold for treating a feature as a sparse feature */
double sparse_threshold_;
/*! \brief store feature names */
std::vector<std::string> feature_names_;
/*! \brief store feature names */
static const char* binary_file_token;
int num_groups_;
std::vector<int> real_feature_idx_;
std::vector<int> feature2group_;
std::vector<int> feature2subfeature_;
std::vector<uint64_t> group_bin_boundaries_;
std::vector<int> group_feature_start_;
std::vector<int> group_feature_cnt_;
std::vector<int8_t> monotone_types_;
std::vector<double> feature_penalty_;
bool is_finish_load_;
int max_bin_;
std::vector<int32_t> max_bin_by_feature_;
std::vector<std::vector<double>> forced_bin_bounds_;
int bin_construct_sample_cnt_;
int min_data_in_bin_;
bool use_missing_;
bool zero_as_missing_;
};
} // namespace LightGBM
#endif // LightGBM_DATA_H_
|
nanopore_hdp.c | //
// nanopore_hdp.c
//
//
// Created by Jordan Eizenga on 1/8/16.
//
//
// in 0-based index
#define ALIGNMENT_KMER_COL 9
#define ALIGNMENT_STRAND_COL 4
#define ALIGNMENT_SIGNAL_COL 13
#define NUM_ALIGNMENT_COLS 15
#define MODEL_ROW_HEADER_LENGTH 0
#define MODEL_MEAN_ENTRY 0
#define MODEL_NOISE_ENTRY 1
#define MODEL_ENTRY_LENGTH 5
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
#include "pairwiseAligner.h"
#include "hdp_math_utils.h"
NanoporeHDP* package_nanopore_hdp(HierarchicalDirichletProcess* hdp, const char* alphabet, int64_t alphabet_size,
int64_t kmer_length) {
NanoporeHDP* nhdp = (NanoporeHDP*) malloc(sizeof(NanoporeHDP));
// copy and sort alphabet
char* internal_alphabet = (char*) malloc(sizeof(char) * (alphabet_size + 1));
for (int64_t i = 0; i < alphabet_size; i++) {
internal_alphabet[i] = alphabet[i];
}
int64_t min_idx;
char temp;
for (int64_t i = 0; i < alphabet_size; i++) {
min_idx = i;
for (int64_t j = i + 1; j < alphabet_size; j++) {
if (internal_alphabet[j] < internal_alphabet[min_idx]) {
min_idx = j;
}
}
temp = internal_alphabet[i];
internal_alphabet[i] = internal_alphabet[min_idx];
internal_alphabet[min_idx] = temp;
}
for (int64_t i = 1; i < alphabet_size; i++) {
if (alphabet[i - 1] == alphabet[i]) {
fprintf(stderr, "Characters of alphabet must be distinct.\n");
exit(EXIT_FAILURE);
}
}
internal_alphabet[alphabet_size] = '\0';
nhdp->hdp = hdp;
nhdp->alphabet = internal_alphabet;
nhdp->alphabet_size = alphabet_size;
nhdp->kmer_length = kmer_length;
// note: destroying the HDP housed in the NHDP will destroy the DistributionMetricMemo
nhdp->distr_metric_memos = stSet_construct2(&free);
return nhdp;
}
void destroy_nanopore_hdp(NanoporeHDP* nhdp) {
destroy_hier_dir_proc(nhdp->hdp);
stSet_destruct(nhdp->distr_metric_memos);
free(nhdp->alphabet);
free(nhdp);
}
int64_t get_nanopore_hdp_kmer_length(NanoporeHDP* nhdp) {
return nhdp->kmer_length;
}
int64_t get_nanopore_hdp_alphabet_size(NanoporeHDP* nhdp) {
return nhdp->alphabet_size;
}
char* get_nanopore_hdp_alphabet(NanoporeHDP* nhdp) {
char* alphabet = nhdp->alphabet;
int64_t alphabet_size = nhdp->alphabet_size;
char* copy = (char*) malloc(sizeof(char) * (alphabet_size + 1));
for (int64_t i = 0; i < alphabet_size; i++) {
copy[i] = alphabet[i];
}
copy[alphabet_size] = '\0';
return copy;
}
// wrappers
void execute_nhdp_gibbs_sampling(NanoporeHDP* nhdp, int64_t num_samples, int64_t burn_in,
int64_t thinning, bool verbose) {
execute_gibbs_sampling(nhdp->hdp, num_samples, burn_in, thinning, verbose);
}
void execute_nhdp_gibbs_sampling_with_snapshots(NanoporeHDP* nhdp,
int64_t num_samples, int64_t burn_in, int64_t thinning,
void (*snapshot_func)(HierarchicalDirichletProcess*, void*),
void* snapshot_func_args, bool verbose) {
execute_gibbs_sampling_with_snapshots(nhdp->hdp, num_samples, burn_in, thinning, snapshot_func, snapshot_func_args,
verbose);
}
void finalize_nhdp_distributions(NanoporeHDP* nhdp) {
finalize_distributions(nhdp->hdp);
}
void normal_inverse_gamma_params_from_minION(const char* model_filepath, double* mu_out, double* nu_out,
double* alpha_out, double* beta_out) {
// model format:
// stateNumber \t alphabetSize \t alphabet \t kmerSize
// [level_mean, level_stdv, noise_mean, noise_stdv, noise_lambda]
FILE* model_file = fopen(model_filepath, "r");
char* line = stFile_getLineFromFile(model_file);
stList* tokens = stString_split(line);
if (stList_length(tokens) != 4) {
st_errAbort("normal_inverse_gamma_params_from_minION: Model format has changed invalid model"
"found here %s\n", model_filepath);
}
free(line);
stList_destruct(tokens);
// ignore transitions line
line = stFile_getLineFromFile(model_file);
tokens = stString_split(line);
if (stList_length(tokens) != 10) {
st_errnoAbort("More than 3-state hmm transitions parameters found\n");
}
line = stFile_getLineFromFile(model_file);
tokens = stString_split(line);
int64_t table_length = (stList_length(tokens) - MODEL_ROW_HEADER_LENGTH) / MODEL_ENTRY_LENGTH;
double* means = (double*) malloc(sizeof(double) * table_length);
double* precisions = (double*) malloc(sizeof(double) * table_length);
int64_t mean_offset = MODEL_ROW_HEADER_LENGTH + MODEL_MEAN_ENTRY; // 1
int64_t noise_offset = MODEL_ROW_HEADER_LENGTH + MODEL_NOISE_ENTRY; // 2
char* mean_str;
char* noise_str;
double noise;
for (int i = 0; i < table_length; i++) {
mean_str = (char*) stList_get(tokens, mean_offset + i * MODEL_ENTRY_LENGTH);
sscanf(mean_str, "%lf", &(means[i]));
noise_str = (char*) stList_get(tokens, noise_offset + i * MODEL_ENTRY_LENGTH);
sscanf(noise_str, "%lf", &noise);
precisions[i] = 1.0 / (noise * noise);
}
free(line);
stList_destruct(tokens);
mle_normal_inverse_gamma_params(means, precisions, table_length, mu_out, nu_out, alpha_out, beta_out);
free(means);
free(precisions);
fclose(model_file);
}
// fixed concentration parameters 'gamma' for each depth
HierarchicalDirichletProcess* minION_hdp(int64_t num_dps, int64_t depth, double* gamma, double sampling_grid_start,
double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
double mu, nu, alpha, beta;
normal_inverse_gamma_params_from_minION(model_filepath, &mu, &nu, &alpha, &beta);
return new_hier_dir_proc(num_dps, depth, gamma, sampling_grid_start, sampling_grid_stop,
sampling_grid_length, mu, nu, alpha, beta);
}
// Gamma distribution prior on the concentration parameters 'gamma'
// must designate vector of 'alpha' and 'beta' parameters of distribution for each depth
HierarchicalDirichletProcess* minION_hdp_2(int64_t num_dps, int64_t depth, double* gamma_alpha,
double* gamma_beta, double sampling_grid_start,
double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
double mu, nu, alpha, beta;
normal_inverse_gamma_params_from_minION(model_filepath, &mu, &nu, &alpha, &beta);
return new_hier_dir_proc_2(num_dps, depth, gamma_alpha, gamma_beta, sampling_grid_start,
sampling_grid_stop, sampling_grid_length, mu, nu, alpha, beta);
}
void update_nhdp_from_alignment(NanoporeHDP* nhdp, const char* alignment_filepath, bool has_header) {
update_nhdp_from_alignment_with_filter(nhdp, alignment_filepath, has_header, NULL);
}
void update_nhdp_from_alignment_with_filter(NanoporeHDP* nhdp, const char* alignment_filepath,
bool has_header, const char* strand_filter) {
stList* signal_list = stList_construct3(0, &free);
stList* dp_id_list = stList_construct3(0, &free);
FILE* align_file = fopen(alignment_filepath, "r");
if (align_file == NULL) {
fprintf(stderr, "Alignment %s file does not exist.\n", alignment_filepath);
exit(EXIT_FAILURE);
}
stList* tokens;
int64_t line_length;
char* kmer;
char* strand;
char* signal_str;
int64_t* dp_id_ptr;
double* signal_ptr;
bool warned = false;
int proceed = 0;
char* line = stFile_getLineFromFile(align_file);
if (has_header) {
line = stFile_getLineFromFile(align_file);
}
while (line != NULL) {
tokens = stString_split(line);
line_length = stList_length(tokens);
if (!warned) {
if (line_length != NUM_ALIGNMENT_COLS) {
fprintf(stderr, "Input format has changed from design period, HDP may receive incorrect data.\n");
warned = true;
}
}
strand = (char*) stList_get(tokens, ALIGNMENT_STRAND_COL);
if (strand_filter != NULL) {
proceed = strcmp(strand, strand_filter);
}
if (proceed == 0) {
signal_str = (char*) stList_get(tokens, ALIGNMENT_SIGNAL_COL);
kmer = (char*) stList_get(tokens, ALIGNMENT_KMER_COL);
signal_ptr = (double*) malloc(sizeof(double));
dp_id_ptr = (int64_t*) malloc(sizeof(int64_t));
sscanf(signal_str, "%lf", signal_ptr);
*dp_id_ptr = kmer_id(kmer, nhdp->alphabet, nhdp->alphabet_size, nhdp->kmer_length);
stList_append(signal_list, signal_ptr);
stList_append(dp_id_list, dp_id_ptr);
}
stList_destruct(tokens);
free(line);
line = stFile_getLineFromFile(align_file);
}
fclose(align_file);
int64_t data_length;
double* signal = stList_toDoublePtr(signal_list, &data_length);
int64_t* dp_ids = stList_toIntPtr(dp_id_list, &data_length);
stList_destruct(signal_list);
stList_destruct(dp_id_list);
reset_hdp_data(nhdp->hdp);
pass_data_to_hdp(nhdp->hdp, signal, dp_ids, data_length);
}
// n^k
int64_t power(int64_t n, int64_t k) {
int64_t num = 1;
for (int64_t i = 0; i < k; i++) {
num *= n;
}
return num;
}
// ((n k))
int64_t multiset_number(int64_t n, int64_t k) {
int64_t num = 1;
for (int64_t m = n + k - 1; m >= n; m--) {
num *= m;
}
for (int64_t m = k; m >= 2; m--) {
num /= m;
}
return num;
}
int64_t* get_word(int64_t word_id, int64_t alphabet_size, int64_t word_length) {
int64_t* word = (int64_t*) malloc(sizeof(int64_t) * word_length);
int64_t id_remainder = word_id;
for (int64_t i = 0; i < word_length; i++) {
word[word_length - i - 1] = id_remainder % alphabet_size;
id_remainder /= alphabet_size;
}
return word;
}
int64_t* get_word_multiset(int64_t word_id, int64_t alphabet_size, int64_t word_length) {
int64_t* multiset = get_word(word_id, alphabet_size, word_length);
// selection sort 'cause whatever
int64_t min_idx;
int64_t temp;
for (int64_t i = 0; i < word_length; i++) {
min_idx = i;
for (int64_t j = i + 1; j < word_length; j++) {
if (multiset[j] < multiset[min_idx]) {
min_idx = j;
}
}
temp = multiset[i];
multiset[i] = multiset[min_idx];
multiset[min_idx] = temp;
}
return multiset;
}
int64_t multiset_id_internal(int64_t* tail, int64_t tail_length, int64_t alphabet_min, int64_t alphabet_size) {
int64_t head = tail[0];
if (tail_length == 1) {
return head - alphabet_min;
}
int64_t step = 0;
for (int64_t i = alphabet_min; i < alphabet_size; i++) {
if (head > i) {
step += multiset_number(alphabet_size - i, tail_length - 1);
}
else {
return step + multiset_id_internal(&(tail[1]), tail_length - 1, i, alphabet_size);
}
}
fprintf(stderr, "Character outside alphabet included in multiset\n");
exit(EXIT_FAILURE);
}
int64_t multiset_id(int64_t* multiset, int64_t length, int64_t alphabet_size) {
return multiset_id_internal(multiset, length, 0, alphabet_size);
}
int64_t word_id_to_multiset_id(int64_t word_id, int64_t alphabet_size, int64_t word_length) {
int64_t* multiset = get_word_multiset(word_id, alphabet_size, word_length);
int64_t id = multiset_id(multiset, word_length, alphabet_size);
free(multiset);
return id;
}
int64_t word_id(int64_t* word, int64_t alphabet_size, int64_t word_length) {
int64_t id = 0;
int64_t step = 1;
for (int64_t i = word_length - 1; i >= 0; i--) {
id += step * word[i];
step *= alphabet_size;
}
return id;
}
int64_t* kmer_to_word(char* kmer, char* alphabet, int64_t alphabet_size, int64_t kmer_length) {
int64_t* word = (int64_t*) malloc(sizeof(int64_t) * kmer_length);
for (int64_t i = 0; i < kmer_length; i++) {
int64_t j = 0;
while (kmer[i] != alphabet[j]) {
j++;
if (j == alphabet_size) {
fprintf(stderr, "[signalAlign] - ERROR: K-mer contains character outside alphabet. "
"Got offending kmer is: %s. alphabet is %s kmer length %"PRId64"\n",
kmer, alphabet, kmer_length);
exit(EXIT_FAILURE);
}
}
word[i] = j;
}
return word;
}
int64_t kmer_id(char* kmer, char* alphabet, int64_t alphabet_size, int64_t kmer_length) {
int64_t* word = kmer_to_word(kmer, alphabet, alphabet_size, kmer_length);
int64_t id = word_id(word, alphabet_size, kmer_length);
free(word);
return id;
}
int64_t standard_kmer_id(char* kmer, int64_t kmer_length) {
return kmer_id(kmer, "ACGT", 4, kmer_length);
}
int64_t nhdp_kmer_id(NanoporeHDP* nhdp, char* kmer) {
return kmer_id(kmer, nhdp->alphabet, nhdp->alphabet_size, nhdp->kmer_length);
}
double get_nanopore_kmer_density(NanoporeHDP* nhdp, void *kmer, void *x) {
if (kmer == NULL) {
return LOG_ZERO;
} else {
double u = *(double *)x;
//return dir_proc_density(nhdp->hdp, *(double *) x, nhdp_kmer_id(nhdp, (char *)kmer));
return dir_proc_density(nhdp->hdp, u, nhdp_kmer_id(nhdp, (char *)kmer));
}
}
double get_kmer_distr_distance(NanoporeDistributionMetricMemo* memo, char* kmer_1, char* kmer_2) {
NanoporeHDP* nhdp = memo->nhdp;
return get_dir_proc_distance(memo->memo, nhdp_kmer_id(nhdp, kmer_1), nhdp_kmer_id(nhdp, kmer_2));
}
NanoporeDistributionMetricMemo* package_nanopore_metric_memo(NanoporeHDP* nhdp, DistributionMetricMemo* memo) {
NanoporeDistributionMetricMemo* nanopore_memo = (NanoporeDistributionMetricMemo*) malloc(sizeof(NanoporeDistributionMetricMemo));
nanopore_memo->nhdp = nhdp;
nanopore_memo->memo = memo;
return nanopore_memo;
}
NanoporeDistributionMetricMemo* new_nhdp_kl_divergence_memo(NanoporeHDP* nhdp) {
return package_nanopore_metric_memo(nhdp, new_kl_divergence_memo(nhdp->hdp));
}
NanoporeDistributionMetricMemo* new_nhdp_hellinger_distance_memo(NanoporeHDP* nhdp) {
return package_nanopore_metric_memo(nhdp, new_hellinger_distance_memo(nhdp->hdp));
}
NanoporeDistributionMetricMemo* new_nhdp_l2_distance_memo(NanoporeHDP* nhdp) {
return package_nanopore_metric_memo(nhdp, new_l2_distance_memo(nhdp->hdp));
}
NanoporeDistributionMetricMemo* new_nhdp_shannon_jensen_distance_memo(NanoporeHDP* nhdp) {
return package_nanopore_metric_memo(nhdp, new_shannon_jensen_distance_memo(nhdp->hdp));
}
double compare_nhdp_distrs_kl_divergence(NanoporeHDP* nhdp_1, char* kmer_1,
NanoporeHDP* nhdp_2, char* kmer_2) {
return compare_hdp_distrs_kl_divergence(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1),
nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2));
}
double compare_nhdp_distrs_l2_distance(NanoporeHDP* nhdp_1, char* kmer_1,
NanoporeHDP* nhdp_2, char* kmer_2) {
return compare_hdp_distrs_l2_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1),
nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2));
}
double compare_nhdp_distrs_shannon_jensen_distance(NanoporeHDP* nhdp_1, char* kmer_1,
NanoporeHDP* nhdp_2, char* kmer_2) {
return compare_hdp_distrs_shannon_jensen_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1),
nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2));
}
double compare_nhdp_distrs_hellinger_distance(NanoporeHDP* nhdp_1, char* kmer_1,
NanoporeHDP* nhdp_2, char* kmer_2) {
return compare_hdp_distrs_hellinger_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1),
nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2));
}
double kmer_distr_expected_val(NanoporeHDP* nhdp, char* kmer) {
return dir_proc_expected_val(nhdp->hdp, nhdp_kmer_id(nhdp, kmer));
}
double kmer_distr_variance(NanoporeHDP* nhdp, char* kmer) {
return dir_proc_variance(nhdp->hdp, nhdp_kmer_id(nhdp, kmer));
}
int64_t flat_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) {
int64_t num_leaves = power(alphabet_size, kmer_length);
return num_leaves + 1;
}
void flat_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) {
int64_t last_dp_id = power(alphabet_size, kmer_length);
for (int64_t id = 0; id < last_dp_id; id++) {
set_dir_proc_parent(hdp, id, last_dp_id);
}
}
NanoporeHDP* flat_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length,
double base_gamma, double leaf_gamma,
double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
double* gamma_params = (double*) malloc(sizeof(double) * 2);
gamma_params[0] = base_gamma;
gamma_params[1] = leaf_gamma;
int64_t num_dps = flat_hdp_num_dps(alphabet_size, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 2, gamma_params, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
flat_hdp_model_internal(hdp, alphabet_size, kmer_length);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
NanoporeHDP* flat_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length,
double base_gamma_alpha, double base_gamma_beta,
double leaf_gamma_alpha, double leaf_gamma_beta,
double sampling_grid_start, double sampling_grid_stop,
int64_t sampling_grid_length, const char* model_filepath) {
double* gamma_alpha = (double*) malloc(sizeof(double) * 2);
gamma_alpha[0] = base_gamma_alpha;
gamma_alpha[1] = leaf_gamma_alpha;
double* gamma_beta = (double*) malloc(sizeof(double) * 2);
gamma_beta[0] = base_gamma_beta;
gamma_beta[1] = leaf_gamma_beta;
int64_t num_dps = flat_hdp_num_dps(alphabet_size, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 2, gamma_alpha, gamma_beta, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
flat_hdp_model_internal(hdp, alphabet_size, kmer_length);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
int64_t multiset_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) {
int64_t num_leaves = power(alphabet_size, kmer_length);
int64_t num_middle_dps = multiset_number(alphabet_size, kmer_length);
return num_leaves + num_middle_dps + 1;
}
void multiset_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) {
int64_t num_leaves = power(alphabet_size, kmer_length);
int64_t num_middle_dps = multiset_number(alphabet_size, kmer_length);
// set kmer parents to multisets
int64_t multiset_id;
for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) {
multiset_id = word_id_to_multiset_id(kmer_id, alphabet_size, kmer_length);
set_dir_proc_parent(hdp, kmer_id, num_leaves + multiset_id);
}
// set multiset parents to base dp
int64_t last_dp_id = num_leaves + num_middle_dps;
for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) {
set_dir_proc_parent(hdp, middle_dp_id, last_dp_id);
}
}
NanoporeHDP* multiset_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length,
double base_gamma, double middle_gamma, double leaf_gamma,
double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
double* gamma_params = (double*) malloc(sizeof(double) * 3);
gamma_params[0] = base_gamma;
gamma_params[1] = middle_gamma;
gamma_params[2] = leaf_gamma;
int64_t num_dps = multiset_hdp_num_dps(alphabet_size, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
multiset_hdp_model_internal(hdp, alphabet_size, kmer_length);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
NanoporeHDP* multiset_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length,
double base_gamma_alpha, double base_gamma_beta,
double middle_gamma_alpha, double middle_gamma_beta,
double leaf_gamma_alpha, double leaf_gamma_beta,
double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
double* gamma_alpha = (double*) malloc(sizeof(double) * 3);
gamma_alpha[0] = base_gamma_alpha;
gamma_alpha[1] = middle_gamma_alpha;
gamma_alpha[2] = leaf_gamma_alpha;
double* gamma_beta = (double*) malloc(sizeof(double) * 3);
gamma_beta[0] = base_gamma_beta;
gamma_beta[1] = middle_gamma_beta;
gamma_beta[2] = leaf_gamma_beta;
int64_t num_dps = multiset_hdp_num_dps(alphabet_size, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
multiset_hdp_model_internal(hdp, alphabet_size, kmer_length);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
int64_t middle_2_nts_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) {
if (kmer_length <= 2) {
fprintf(stderr, "k-mer is not long enough for middle 2 nucleotides HDP\n");
exit(EXIT_FAILURE);
}
return power(alphabet_size, kmer_length) + power(alphabet_size, 2) + 1;
}
int64_t kmer_id_to_middle_nts_id(int64_t kmer_id, int64_t alphabet_size, int64_t kmer_length) {
int64_t* kmer = get_word(kmer_id, alphabet_size, kmer_length);
int64_t id = alphabet_size * kmer[kmer_length / 2 - 1] + kmer[kmer_length / 2];
free(kmer);
return id;
}
void middle_2_nts_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) {
int64_t num_leaves = power(alphabet_size, kmer_length);
int64_t num_middle_dps = power(alphabet_size, 2);
int64_t middle_dp_id;
for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) {
middle_dp_id = kmer_id_to_middle_nts_id(kmer_id, alphabet_size, kmer_length);
set_dir_proc_parent(hdp, kmer_id, middle_dp_id + num_leaves);
}
int64_t last_dp_id = num_leaves + num_middle_dps;
for (int64_t id = num_leaves; id < last_dp_id; id++) {
set_dir_proc_parent(hdp, id, last_dp_id);
}
}
NanoporeHDP* middle_2_nts_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length,
double base_gamma, double middle_gamma, double leaf_gamma,
double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
if (kmer_length % 2 != 0) {
fprintf(stderr, "Warning: middle two nucleotides of odd length kmer is ambiguous. Resolving arbitrarily.\n");
}
double* gamma_params = (double*) malloc(sizeof(double) * 3);
gamma_params[0] = base_gamma;
gamma_params[1] = middle_gamma;
gamma_params[2] = leaf_gamma;
int64_t num_dps = middle_2_nts_hdp_num_dps(alphabet_size, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
middle_2_nts_hdp_model_internal(hdp, alphabet_size, kmer_length);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
int64_t word_id_to_group_multiset_id(int64_t word_id, int64_t* char_groups, int64_t alphabet_size,
int64_t word_length, int64_t num_groups) {
int64_t* word = get_word(word_id, alphabet_size, word_length);
for (int64_t i = 0; i < word_length; i++) {
word[i] = char_groups[word[i]];
}
int64_t min_idx;
int64_t temp;
for (int64_t i = 0; i < word_length; i++) {
min_idx = i;
for (int64_t j = i + 1; j < word_length; j++) {
if (word[j] < word[min_idx]) {
min_idx = j;
}
}
temp = word[i];
word[i] = word[min_idx];
word[min_idx] = temp;
}
int64_t id = multiset_id(word, word_length, num_groups);
free(word);
return id;
}
int64_t group_multiset_hdp_num_dps(int64_t alphabet_size, int64_t* char_groups, int64_t kmer_length) {
int64_t num_groups = 0;
for (int64_t i = 0; i < alphabet_size; i++) {
if (char_groups[i] + 1 > num_groups) {
num_groups = char_groups[i] + 1;
}
}
int64_t num_leaves = power(alphabet_size, kmer_length);
int64_t num_middle_dps = multiset_number(num_groups, kmer_length);
return num_leaves + num_middle_dps + 1;
}
void group_multiset_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t* char_groups,
int64_t alphabet_size, int64_t kmer_length) {
int64_t num_groups = 0;
for (int64_t i = 0; i < alphabet_size; i++) {
if (char_groups[i] + 1 > num_groups) {
num_groups = char_groups[i] + 1;
}
}
int64_t num_leaves = power(alphabet_size, kmer_length);
int64_t num_middle_dps = multiset_number(num_groups, kmer_length);
// set kmer parents to multisets
int64_t multiset_id;
for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) {
multiset_id = word_id_to_group_multiset_id(kmer_id, char_groups, alphabet_size, kmer_length, num_groups);
set_dir_proc_parent(hdp, kmer_id, num_leaves + multiset_id);
}
// set multiset parents to base dp
int64_t last_dp_id = num_leaves + num_middle_dps;
for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) {
set_dir_proc_parent(hdp, middle_dp_id, last_dp_id);
}
}
void confirm_valid_groupings(int64_t* char_groups, int64_t alphabet_size) {
for (int64_t i = 0; i < alphabet_size; i++) {
if (char_groups[i] < 0) {
fprintf(stderr, "Group numbers must be non-negative.\n");
exit(EXIT_FAILURE);
}
}
int64_t num_groups = 0;
for (int64_t i = 0; i < alphabet_size; i++) {
if (char_groups[i] + 1 > num_groups) {
num_groups = char_groups[i] + 1;
}
}
for (int64_t i = 0; i < num_groups; i++) {
bool found_group = false;
for (int64_t j = 0; j < alphabet_size; j++) {
if (char_groups[j] == i) {
found_group = true;
break;
}
}
if (!found_group) {
fprintf(stderr, "Groups must be consecutively numbered starting with 0.\n");
exit(EXIT_FAILURE);
}
}
}
int64_t* alphabet_sort_groups(const char* alphabet, int64_t* char_groups, int64_t alphabet_size) {
char* aux_alphabet = (char*) malloc(sizeof(char) * alphabet_size);
int64_t* sorted_char_groups = (int64_t*) malloc(sizeof(int64_t) * alphabet_size);
for (int64_t i = 0; i < alphabet_size; i++) {
aux_alphabet[i] = alphabet[i];
sorted_char_groups[i] = char_groups[i];
}
int64_t temp_group;
char temp_char;
int64_t min_idx;
for (int64_t i = 0; i < alphabet_size; i++) {
min_idx = i;
for (int64_t j = i + 1; j < alphabet_size; j++) {
if (aux_alphabet[j] < aux_alphabet[min_idx]) {
min_idx = j;
}
}
temp_char = aux_alphabet[i];
aux_alphabet[i] = aux_alphabet[min_idx];
aux_alphabet[min_idx] = temp_char;
temp_group = sorted_char_groups[i];
sorted_char_groups[i] = sorted_char_groups[min_idx];
sorted_char_groups[min_idx] = temp_group;
}
free(aux_alphabet);
return sorted_char_groups;
}
// assumes char_groups are 0-based and consecutively numbered
NanoporeHDP* group_multiset_hdp_model(const char* alphabet, int64_t* char_groups, int64_t alphabet_size, int64_t kmer_length,
double base_gamma, double middle_gamma, double leaf_gamma,
double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
confirm_valid_groupings(char_groups, alphabet_size);
double* gamma_params = (double*) malloc(sizeof(double) * 3);
gamma_params[0] = base_gamma;
gamma_params[1] = middle_gamma;
gamma_params[2] = leaf_gamma;
int64_t num_dps = group_multiset_hdp_num_dps(alphabet_size, char_groups, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
int64_t* sorted_char_groups = alphabet_sort_groups(alphabet, char_groups, alphabet_size);
group_multiset_hdp_model_internal(hdp, sorted_char_groups, alphabet_size, kmer_length);
free(sorted_char_groups);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
// assumes char_groups are 0-based and consecutively numbered
NanoporeHDP* group_multiset_hdp_model_2(const char* alphabet, int64_t* char_groups, int64_t alphabet_size, int64_t kmer_length,
double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha,
double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta,
double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
confirm_valid_groupings(char_groups, alphabet_size);
double *gamma_alpha = (double *) malloc(sizeof(double) * 3);
gamma_alpha[0] = base_gamma_alpha;
gamma_alpha[1] = middle_gamma_alpha;
gamma_alpha[2] = leaf_gamma_alpha;
double *gamma_beta = (double *) malloc(sizeof(double) * 3);
gamma_beta[0] = base_gamma_beta;
gamma_beta[1] = middle_gamma_beta;
gamma_beta[2] = leaf_gamma_beta;
int64_t num_dps = group_multiset_hdp_num_dps(alphabet_size, char_groups, kmer_length);
HierarchicalDirichletProcess *hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
int64_t *sorted_char_groups = alphabet_sort_groups(alphabet, char_groups, alphabet_size);
group_multiset_hdp_model_internal(hdp, sorted_char_groups, alphabet_size, kmer_length);
free(sorted_char_groups);
finalize_hdp_structure(hdp);
NanoporeHDP *nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
NanoporeHDP* middle_2_nts_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length,
double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha,
double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta,
double sampling_grid_start, double sampling_grid_stop,
int64_t sampling_grid_length, const char* model_filepath) {
if (kmer_length % 2 != 0) {
fprintf(stderr, "Warning: middle 2 nucleotides of odd length kmer is ambiguous. Resolving arbitrarily.\n");
}
double* gamma_alpha = (double*) malloc(sizeof(double) * 3);
gamma_alpha[0] = base_gamma_alpha;
gamma_alpha[1] = middle_gamma_alpha;
gamma_alpha[2] = leaf_gamma_alpha;
double* gamma_beta = (double*) malloc(sizeof(double) * 3);
gamma_beta[0] = base_gamma_beta;
gamma_beta[1] = middle_gamma_beta;
gamma_beta[2] = leaf_gamma_beta;
int64_t num_dps = middle_2_nts_hdp_num_dps(alphabet_size, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
middle_2_nts_hdp_model_internal(hdp, alphabet_size, kmer_length);
finalize_hdp_structure(hdp);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
return nhdp;
}
int64_t purine_composition_hdp_num_dps(int64_t num_purines, int64_t num_pyrimidines, int64_t kmer_length) {
int64_t num_leaves = power(num_purines + num_pyrimidines, kmer_length);
int64_t num_middle_dps = kmer_length + 1;
return num_leaves + num_middle_dps + 1;
}
void purine_composition_hdp_model_internal(HierarchicalDirichletProcess* hdp, bool* purine_alphabet,
int64_t alphabet_size, int64_t kmer_length) {
int64_t num_leaves = power(alphabet_size, kmer_length);
int64_t num_middle_dps = kmer_length + 1;
// set kmer parents to purine multisets
int64_t num_purines;
int64_t* word;
for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) {
word = get_word(kmer_id, alphabet_size, kmer_length);
num_purines = 0;
for (int64_t i = 0; i < kmer_length; i++) {
if (purine_alphabet[word[i]]) {
num_purines++;
}
}
free(word);
set_dir_proc_parent(hdp, kmer_id, num_leaves + num_purines);
}
// set purine set parents to base dp
int64_t last_dp_id = num_leaves + num_middle_dps;
for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) {
set_dir_proc_parent(hdp, middle_dp_id, last_dp_id);
}
}
NanoporeHDP* purine_composition_hdp_model(char* purine_alphabet, int64_t num_purines,
char* pyrimidine_alphabet, int64_t num_pyrimidines,
int64_t kmer_length, double base_gamma, double middle_gamma,
double leaf_gamma, double sampling_grid_start, double sampling_grid_stop,
int64_t sampling_grid_length, const char* model_filepath) {
double* gamma_params = (double*) malloc(sizeof(double) * 3);
gamma_params[0] = base_gamma;
gamma_params[1] = middle_gamma;
gamma_params[2] = leaf_gamma;
int64_t num_dps = purine_composition_hdp_num_dps(num_purines, num_pyrimidines, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
int64_t alphabet_size = num_purines + num_pyrimidines;
char* alphabet = (char*) malloc(sizeof(char) * alphabet_size);
for (int64_t i = 0; i < num_purines; i++) {
alphabet[i] = purine_alphabet[i];
}
for (int64_t i = 0; i < num_pyrimidines; i++) {
alphabet[i + num_purines] = pyrimidine_alphabet[i];
}
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
// get back the alphabet in the internal ordering
free(alphabet);
alphabet = get_nanopore_hdp_alphabet(nhdp);
bool* purines = (bool*) malloc(sizeof(bool) * alphabet_size);
for (int64_t i = 0; i < num_purines; i++) {
purines[i] = false;
for (int64_t j = 0; j < num_purines; j++) {
if (alphabet[i] == purine_alphabet[j]) {
purines[i] = true;
break;
}
}
}
free(alphabet);
purine_composition_hdp_model_internal(hdp, purines, alphabet_size, kmer_length);
free(purines);
finalize_hdp_structure(hdp);
return nhdp;
}
NanoporeHDP* purine_composition_hdp_model_2(char* purine_alphabet, int64_t num_purines,
char* pyrimidine_alphabet, int64_t num_pyrimidines,
int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta,
double middle_gamma_alpha, double middle_gamma_beta,
double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start,
double sampling_grid_stop, int64_t sampling_grid_length,
const char* model_filepath) {
double* gamma_alpha = (double*) malloc(sizeof(double) * 3);
gamma_alpha[0] = base_gamma_alpha;
gamma_alpha[1] = middle_gamma_alpha;
gamma_alpha[2] = leaf_gamma_alpha;
double* gamma_beta = (double*) malloc(sizeof(double) * 3);
gamma_beta[0] = base_gamma_beta;
gamma_beta[1] = middle_gamma_beta;
gamma_beta[2] = leaf_gamma_beta;
int64_t num_dps = purine_composition_hdp_num_dps(num_purines, num_pyrimidines, kmer_length);
HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start,
sampling_grid_stop, sampling_grid_length,
model_filepath);
int64_t alphabet_size = num_purines + num_pyrimidines;
char* alphabet = (char*) malloc(sizeof(char) * alphabet_size);
for (int64_t i = 0; i < num_purines; i++) {
alphabet[i] = purine_alphabet[i];
}
for (int64_t i = 0; i < num_pyrimidines; i++) {
alphabet[i + num_purines] = pyrimidine_alphabet[i];
}
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
// get back the alphabet in the internal ordering
free(alphabet);
alphabet = get_nanopore_hdp_alphabet(nhdp);
bool* purines = (bool*) malloc(sizeof(bool) * alphabet_size);
for (int64_t i = 0; i < alphabet_size; i++) {
purines[i] = false;
for (int64_t j = 0; j < num_purines; j++) {
if (alphabet[i] == purine_alphabet[j]) {
purines[i] = true;
break;
}
}
}
free(alphabet);
purine_composition_hdp_model_internal(hdp, purines, alphabet_size, kmer_length);
free(purines);
finalize_hdp_structure(hdp);
return nhdp;
}
void serialize_nhdp(NanoporeHDP* nhdp, const char* filepath) {
FILE* out = fopen(filepath, "w");
fprintf(out, "%"PRId64"\n", nhdp->alphabet_size);
fprintf(out, "%s\n", nhdp->alphabet);
fprintf(out, "%"PRId64"\n", nhdp->kmer_length);
serialize_hdp(nhdp->hdp, out);
fclose(out);
}
NanoporeHDP* deserialize_nhdp(const char* filepath) {
FILE* in = fopen(filepath, "r");
char* line = stFile_getLineFromFile(in);
int64_t alphabet_size;
sscanf(line, "%"SCNd64, &alphabet_size);
free(line);
line = stFile_getLineFromFile(in);
char* alphabet = (char*) malloc(sizeof(char) * alphabet_size);
sscanf(line, "%s", alphabet);
free(line);
line = stFile_getLineFromFile(in);
int64_t kmer_length;
sscanf(line, "%"SCNd64, &kmer_length);
free(line);
HierarchicalDirichletProcess* hdp = deserialize_hdp(in);
fclose(in);
NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length);
free(alphabet);
return nhdp;
}
static void nanoporeHdp_checkThreeLevelPriorParameters(double baseGammaAlpha, double baseGammaBeta,
double middleGammaAlpha, double middleGammaBeta,
double leafGammaAlpha, double leafGammaBeta) {
if ((baseGammaAlpha == NULL_HYPERPARAMETER) || (baseGammaBeta == NULL_HYPERPARAMETER) ||
(middleGammaAlpha == NULL_HYPERPARAMETER) || (middleGammaBeta == NULL_HYPERPARAMETER) ||
(leafGammaAlpha == NULL_HYPERPARAMETER) || (leafGammaBeta == NULL_HYPERPARAMETER)) {
st_errAbort("loadNanoporeHdpFromScratch: You need to provide a alphas and betas for the base, middle, "
"and the leaf distributions for the prior for this NanoporeHdp");
}
}
static void nanoporeHdp_checkThreeLevelFixedParameters(double baseGamma, double middleGamma, double leafGamma) {
if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER) ||
(middleGamma == NULL_HYPERPARAMETER)) {
st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma, middle gamma, and leaf gamma "
"for this NanoporeHdpType\n");
}
}
static void nanoporeHdp_checkTwoLevelPriorParameters(double baseGammaAlpha, double baseGammaBeta,
double leafGammaAlpha, double leafGammaBeta) {
if ((baseGammaAlpha == NULL_HYPERPARAMETER) || (baseGammaBeta == NULL_HYPERPARAMETER) ||
(leafGammaAlpha == NULL_HYPERPARAMETER) || (leafGammaBeta == NULL_HYPERPARAMETER)) {
st_errAbort("loadNanoporeHdpFromScratch: You need to provide a alphas and betas for the base and the leaf"
"distributions for the prior for this NanoporeHdp");
}
}
static NanoporeHDP *loadNanoporeHdpFromScratch(NanoporeHdpType nHdpType, const char *modelFile, int64_t kmerLength,
double baseGamma, double middleGamma, double leafGamma,
double baseGammaAlpha, double baseGammaBeta,
double middleGammaAlpha, double middleGammaBeta,
double leafGammaAlpha, double leafGammaBeta,
double samplingGridStart, double samplingGridEnd,
int64_t samplingGridLength) {
if (nHdpType == singleLevelFixed) {
if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER)) {
st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma and leaf gamma "
"for this NanoporeHdpType\n");
}
NanoporeHDP *nHdp = flat_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGamma, leafGamma,
samplingGridStart, samplingGridEnd, samplingGridLength, modelFile);
return nHdp;
}
if (nHdpType == singleLevelPrior) {
nanoporeHdp_checkTwoLevelPriorParameters(baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = flat_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == singleLevelPrior2) {
nanoporeHdp_checkTwoLevelPriorParameters(baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = flat_hdp_model_2(METHYL_CYTOSINE_ALPHA, SYMBOL_NUMBER, kmerLength,
baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == singleLevelPriorEcoli) {
nanoporeHdp_checkTwoLevelPriorParameters(baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = flat_hdp_model_2(METHYL_CYTOSINE_ADENOSINE_ALPHA, SYMBOL_NUMBER_METHYL_CA, kmerLength,
baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == multisetFixed) {
nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma);
NanoporeHDP *nHdp = multiset_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGamma, middleGamma, leafGamma,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == multisetPrior) {
nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = multiset_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == multisetPrior2) {
nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = multiset_hdp_model_2(METHYL_CYTOSINE_ALPHA, SYMBOL_NUMBER, kmerLength,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == multisetPriorEcoli) {
nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = multiset_hdp_model_2(METHYL_CYTOSINE_ADENOSINE_ALPHA, SYMBOL_NUMBER_METHYL_CA, kmerLength,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == compFixed) {
nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma);
NanoporeHDP *nHdp = purine_composition_hdp_model(PURINES, 2, PYRIMIDINES, 4, kmerLength,
baseGamma, middleGamma, leafGamma,
samplingGridStart, samplingGridEnd,
samplingGridLength, modelFile);
return nHdp;
}
if (nHdpType == compPrior) {
nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha,
middleGammaBeta, leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = purine_composition_hdp_model_2(PURINES, 2, PYRIMIDINES, 4, kmerLength,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd,
samplingGridLength, modelFile);
return nHdp;
}
if (nHdpType == middleNtsFixed) {
nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma);
NanoporeHDP *nHdp = middle_2_nts_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGamma, middleGamma, leafGamma,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == middleNtsPrior) {
nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha,
middleGammaBeta, leafGammaAlpha, leafGammaBeta);
NanoporeHDP *nHdp = middle_2_nts_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == groupMultisetFixed) {
nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma);
// ACEGOT
// {0, 1, 1, 2, 1, 3}
int64_t groups[6] = {0, 1, 1, 2, 1, 3};
NanoporeHDP *nHdp = group_multiset_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, groups, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGamma, middleGamma, leafGamma,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
if (nHdpType == groupMultisetPrior) {
nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha,
middleGammaBeta, leafGammaAlpha, leafGammaBeta);
// ACEGOT
// {0, 1, 1, 2, 1, 3}
int64_t groups[6] = {0, 1, 1, 2, 1, 3};
NanoporeHDP *nHdp = group_multiset_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, groups,
SYMBOL_NUMBER_EPIGENETIC_C, kmerLength,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength,
modelFile);
return nHdp;
}
else {
fprintf(stderr, "loadNanoporeHdpFromScratch: - error making HDP from scratch\n");
exit(EXIT_FAILURE);
}
}
void nanoporeHdp_buildNanoporeHdpFromAlignment(NanoporeHdpType type, int64_t kmerLength,
const char *templateModelFile, const char* complementModelFile,
const char *alignments,
const char *templateHDP, const char *complementHDP,
int64_t nbSamples, int64_t burnIn, int64_t thinning, bool verbose,
double baseGamma, double middleGamma, double leafGamma,
double baseGammaAlpha, double baseGammaBeta,
double middleGammaAlpha, double middleGammaBeta,
double leafGammaAlpha, double leafGammaBeta,
double samplingGridStart, double samplingGridEnd,
int64_t samplingGridLength) {
fprintf(stderr, "Building Nanopore HDP\n");
#pragma omp parallel sections
{
{
fprintf(stderr, "Updating Template HDP from alignments...\n");
NanoporeHDP *nHdpT = loadNanoporeHdpFromScratch(type, templateModelFile, kmerLength,
baseGamma, middleGamma, leafGamma,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength);
update_nhdp_from_alignment_with_filter(nHdpT, alignments, FALSE, "t");
fprintf(stderr, "Running Gibbs for template doing %"PRId64"samples, %"PRId64"burn in, %"PRId64"thinning.\n",
nbSamples, burnIn, thinning);
execute_nhdp_gibbs_sampling(nHdpT, nbSamples, burnIn, thinning, verbose);
finalize_nhdp_distributions(nHdpT);
fprintf(stderr, "Serializing template to %s...\n", templateHDP);
serialize_nhdp(nHdpT, templateHDP);
destroy_nanopore_hdp(nHdpT);
}
#pragma omp section
{
fprintf(stderr, "Updating Complement HDP from alignments...\n");
NanoporeHDP *nHdpC = loadNanoporeHdpFromScratch(type, complementModelFile, kmerLength,
baseGamma, middleGamma, leafGamma,
baseGammaAlpha, baseGammaBeta,
middleGammaAlpha, middleGammaBeta,
leafGammaAlpha, leafGammaBeta,
samplingGridStart, samplingGridEnd, samplingGridLength);
update_nhdp_from_alignment_with_filter(nHdpC, alignments, FALSE, "c");
fprintf(stderr, "Running Gibbs for complement doing %"PRId64"samples, %"PRId64"burn in, %"PRId64"thinning.\n",
nbSamples, burnIn, thinning);
execute_nhdp_gibbs_sampling(nHdpC, nbSamples, burnIn, thinning, verbose);
finalize_nhdp_distributions(nHdpC);
fprintf(stderr, "Serializing complement to %s...\n", complementHDP);
serialize_nhdp(nHdpC, complementHDP);
destroy_nanopore_hdp(nHdpC);
}
}
}
|
visual-effects.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V V IIIII SSSSS U U AAA L %
% V V I SS U U A A L %
% V V I SSS U U AAAAA L %
% V V I SS U U A A L %
% V IIIII SSSSS UUU A A LLLLL %
% %
% EEEEE FFFFF FFFFF EEEEE CCCC TTTTT SSSSS %
% E F F E C T SS %
% EEE FFF FFF EEE C T SSS %
% E F F E C T SS %
% EEEEE F F EEEEE CCCC T SSSSS %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/visual-effects.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d d N o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AddNoiseImage() adds random noise to the image.
%
% The format of the AddNoiseImage method is:
%
% Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
% const double attenuate,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel type.
%
% o noise_type: The type of noise: Uniform, Gaussian, Multiplicative,
% Impulse, Laplacian, or Poisson.
%
% o attenuate: attenuate the random distribution.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AddNoiseImage(const Image *image,const NoiseType noise_type,
const double attenuate,ExceptionInfo *exception)
{
#define AddNoiseImageTag "AddNoise/Image"
CacheView
*image_view,
*noise_view;
Image
*noise_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateAddNoiseImage(image,noise_type,attenuate,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
/*
Add noise in each row.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireVirtualCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,noise_image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait noise_traits=GetPixelChannelTraits(noise_image,channel);
if ((traits == UndefinedPixelTrait) ||
(noise_traits == UndefinedPixelTrait))
continue;
if ((noise_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(noise_image,channel,p[i],q);
continue;
}
SetPixelChannel(noise_image,channel,ClampToQuantum(
GenerateDifferentialNoise(random_info[id],p[i],noise_type,attenuate)),
q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AddNoiseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l u e S h i f t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlueShiftImage() mutes the colors of the image to simulate a scene at
% nighttime in the moonlight.
%
% The format of the BlueShiftImage method is:
%
% Image *BlueShiftImage(const Image *image,const double factor,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o factor: the shift factor.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BlueShiftImage(const Image *image,const double factor,
ExceptionInfo *exception)
{
#define BlueShiftImageTag "BlueShift/Image"
CacheView
*image_view,
*shift_view;
Image
*shift_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate blue shift image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
shift_image=CloneImage(image,0,0,MagickTrue,exception);
if (shift_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(shift_image,DirectClass,exception) == MagickFalse)
{
shift_image=DestroyImage(shift_image);
return((Image *) NULL);
}
/*
Blue-shift DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
shift_view=AcquireAuthenticCacheView(shift_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,shift_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
quantum;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(shift_view,0,y,shift_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) < quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) < quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(GetPixelRed(image,p)+factor*quantum);
pixel.green=0.5*(GetPixelGreen(image,p)+factor*quantum);
pixel.blue=0.5*(GetPixelBlue(image,p)+factor*quantum);
quantum=GetPixelRed(image,p);
if (GetPixelGreen(image,p) > quantum)
quantum=GetPixelGreen(image,p);
if (GetPixelBlue(image,p) > quantum)
quantum=GetPixelBlue(image,p);
pixel.red=0.5*(pixel.red+factor*quantum);
pixel.green=0.5*(pixel.green+factor*quantum);
pixel.blue=0.5*(pixel.blue+factor*quantum);
SetPixelRed(shift_image,ClampToQuantum(pixel.red),q);
SetPixelGreen(shift_image,ClampToQuantum(pixel.green),q);
SetPixelBlue(shift_image,ClampToQuantum(pixel.blue),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(shift_image);
}
sync=SyncCacheViewAuthenticPixels(shift_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,BlueShiftImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
shift_view=DestroyCacheView(shift_view);
if (status == MagickFalse)
shift_image=DestroyImage(shift_image);
return(shift_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h a r c o a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CharcoalImage() creates a new image that is a copy of an existing one with
% the edge highlighted. It allocates the memory necessary for the new Image
% structure and returns a pointer to the new image.
%
% The format of the CharcoalImage method is:
%
% Image *CharcoalImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CharcoalImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
Image
*charcoal_image,
*edge_image;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
edge_image=EdgeImage(image,radius,exception);
if (edge_image == (Image *) NULL)
return((Image *) NULL);
edge_image->alpha_trait=UndefinedPixelTrait;
charcoal_image=(Image *) NULL;
status=ClampImage(edge_image,exception);
if (status != MagickFalse)
charcoal_image=BlurImage(edge_image,radius,sigma,exception);
edge_image=DestroyImage(edge_image);
if (charcoal_image == (Image *) NULL)
return((Image *) NULL);
status=NormalizeImage(charcoal_image,exception);
if (status != MagickFalse)
status=NegateImage(charcoal_image,MagickFalse,exception);
if (status != MagickFalse)
status=GrayscaleImage(charcoal_image,image->intensity,exception);
if (status == MagickFalse)
charcoal_image=DestroyImage(charcoal_image);
return(charcoal_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorizeImage() blends the fill color with each pixel in the image.
% A percentage blend is specified with opacity. Control the application
% of different color components by specifying a different percentage for
% each component (e.g. 90/100/10 is 90% red, 100% green, and 10% blue).
%
% The format of the ColorizeImage method is:
%
% Image *ColorizeImage(const Image *image,const char *blend,
% const PixelInfo *colorize,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A character string indicating the level of blending as a
% percentage.
%
% o colorize: A color value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ColorizeImage(const Image *image,const char *blend,
const PixelInfo *colorize,ExceptionInfo *exception)
{
#define ColorizeImageTag "Colorize/Image"
#define Colorize(pixel,blend_percentage,colorize) \
(((pixel)*(100.0-(blend_percentage))+(colorize)*(blend_percentage))/100.0)
CacheView
*image_view;
GeometryInfo
geometry_info;
Image
*colorize_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
PixelInfo
blend_percentage;
ssize_t
y;
/*
Allocate colorized image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
colorize_image=CloneImage(image,0,0,MagickTrue,exception);
if (colorize_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(colorize_image,DirectClass,exception) == MagickFalse)
{
colorize_image=DestroyImage(colorize_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(colorize_image->colorspace) != MagickFalse) ||
(IsPixelInfoGray(colorize) != MagickFalse))
(void) SetImageColorspace(colorize_image,sRGBColorspace,exception);
if ((colorize_image->alpha_trait == UndefinedPixelTrait) &&
(colorize->alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(colorize_image,OpaqueAlpha,exception);
if (blend == (const char *) NULL)
return(colorize_image);
GetPixelInfo(colorize_image,&blend_percentage);
flags=ParseGeometry(blend,&geometry_info);
blend_percentage.red=geometry_info.rho;
blend_percentage.green=geometry_info.rho;
blend_percentage.blue=geometry_info.rho;
blend_percentage.black=geometry_info.rho;
blend_percentage.alpha=(MagickRealType) TransparentAlpha;
if ((flags & SigmaValue) != 0)
blend_percentage.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
blend_percentage.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
blend_percentage.alpha=geometry_info.psi;
if (blend_percentage.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
blend_percentage.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
blend_percentage.alpha=geometry_info.chi;
}
/*
Colorize DirectClass image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(colorize_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(colorize_image,colorize_image,colorize_image->rows,1)
#endif
for (y=0; y < (ssize_t) colorize_image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,colorize_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) colorize_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(colorize_image); i++)
{
PixelTrait traits = GetPixelChannelTraits(colorize_image,
(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & CopyPixelTrait) != 0)
continue;
SetPixelChannel(colorize_image,(PixelChannel) i,ClampToQuantum(
Colorize(q[i],GetPixelInfoChannel(&blend_percentage,(PixelChannel) i),
GetPixelInfoChannel(colorize,(PixelChannel) i))),q);
}
q+=GetPixelChannels(colorize_image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorizeImageTag,progress,
colorize_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
colorize_image=DestroyImage(colorize_image);
return(colorize_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r M a t r i x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorMatrixImage() applies color transformation to an image. This method
% permits saturation changes, hue rotation, luminance to alpha, and various
% other effects. Although variable-sized transformation matrices can be used,
% typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA
% (or RGBA with offsets). The matrix is similar to those used by Adobe Flash
% except offsets are in column 6 rather than 5 (in support of CMYKA images)
% and offsets are normalized (divide Flash offset by 255).
%
% The format of the ColorMatrixImage method is:
%
% Image *ColorMatrixImage(const Image *image,
% const KernelInfo *color_matrix,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_matrix: the color matrix.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* FUTURE: modify to make use of a MagickMatrix Mutliply function
That should be provided in "matrix.c"
(ASIDE: actually distorts should do this too but currently doesn't)
*/
MagickExport Image *ColorMatrixImage(const Image *image,
const KernelInfo *color_matrix,ExceptionInfo *exception)
{
#define ColorMatrixImageTag "ColorMatrix/Image"
CacheView
*color_view,
*image_view;
double
ColorMatrix[6][6] =
{
{ 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 },
{ 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }
};
Image
*color_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
u,
v,
y;
/*
Map given color_matrix, into a 6x6 matrix RGBKA and a constant
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
i=0;
for (v=0; v < (ssize_t) color_matrix->height; v++)
for (u=0; u < (ssize_t) color_matrix->width; u++)
{
if ((v < 6) && (u < 6))
ColorMatrix[v][u]=color_matrix->values[i];
i++;
}
/*
Initialize color image.
*/
color_image=CloneImage(image,0,0,MagickTrue,exception);
if (color_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(color_image,DirectClass,exception) == MagickFalse)
{
color_image=DestroyImage(color_image);
return((Image *) NULL);
}
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
*message;
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" ColorMatrix image with color matrix:");
message=AcquireString("");
for (v=0; v < 6; v++)
{
*message='\0';
(void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v);
(void) ConcatenateString(&message,format);
for (u=0; u < 6; u++)
{
(void) FormatLocaleString(format,MagickPathExtent,"%+f ",
ColorMatrix[v][u]);
(void) ConcatenateString(&message,format);
}
(void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message);
}
message=DestroyString(message);
}
/*
Apply the ColorMatrix to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
color_view=AcquireAuthenticCacheView(color_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,color_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(color_view,0,y,color_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
h;
size_t
height;
GetPixelInfoPixel(image,p,&pixel);
height=color_matrix->height > 6 ? 6UL : color_matrix->height;
for (h=0; h < (ssize_t) height; h++)
{
double
sum;
sum=ColorMatrix[h][0]*GetPixelRed(image,p)+ColorMatrix[h][1]*
GetPixelGreen(image,p)+ColorMatrix[h][2]*GetPixelBlue(image,p);
if (image->colorspace == CMYKColorspace)
sum+=ColorMatrix[h][3]*GetPixelBlack(image,p);
if (image->alpha_trait != UndefinedPixelTrait)
sum+=ColorMatrix[h][4]*GetPixelAlpha(image,p);
sum+=QuantumRange*ColorMatrix[h][5];
switch (h)
{
case 0: pixel.red=sum; break;
case 1: pixel.green=sum; break;
case 2: pixel.blue=sum; break;
case 3: pixel.black=sum; break;
case 4: pixel.alpha=sum; break;
default: break;
}
}
SetPixelViaPixelInfo(color_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(color_image);
}
if (SyncCacheViewAuthenticPixels(color_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ColorMatrixImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
color_view=DestroyCacheView(color_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
color_image=DestroyImage(color_image);
return(color_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I m p l o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ImplodeImage() creates a new image that is a copy of an existing
% one with the image pixels "implode" by the specified percentage. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ImplodeImage method is:
%
% Image *ImplodeImage(const Image *image,const double amount,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o implode_image: Method ImplodeImage returns a pointer to the image
% after it is implode. A null image is returned if there is a memory
% shortage.
%
% o image: the image.
%
% o amount: Define the extent of the implosion.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ImplodeImage(const Image *image,const double amount,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ImplodeImageTag "Implode/Image"
CacheView
*canvas_view,
*implode_view,
*interpolate_view;
double
radius;
Image
*canvas_image,
*implode_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize implode image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlphaChannel(canvas_image,OpaqueAlphaChannel,exception);
implode_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (implode_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(implode_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
implode_image=DestroyImage(implode_image);
return((Image *) NULL);
}
/*
Compute scaling factor.
*/
scale.x=1.0;
scale.y=1.0;
center.x=0.5*canvas_image->columns;
center.y=0.5*canvas_image->rows;
radius=center.x;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns*PerceptibleReciprocal((double)
canvas_image->rows);
else
if (canvas_image->columns < canvas_image->rows)
{
scale.x=(double) canvas_image->rows*PerceptibleReciprocal((double)
canvas_image->columns);
radius=center.y;
}
/*
Implode image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(canvas_image,exception);
implode_view=AcquireAuthenticCacheView(implode_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,implode_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(implode_view,0,y,implode_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
ssize_t
i;
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait implode_traits = GetPixelChannelTraits(implode_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(implode_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(implode_image,channel,p[i],q);
}
else
{
double
factor;
/*
Implode the pixel.
*/
factor=1.0;
if (distance > 0.0)
factor=pow(sin(MagickPI*sqrt((double) distance)*PerceptibleReciprocal(radius)/2),-amount);
status=InterpolatePixelChannels(canvas_image,interpolate_view,
implode_image,method,(double) (factor*delta.x*PerceptibleReciprocal(scale.x)+center.x),
(double) (factor*delta.y*PerceptibleReciprocal(scale.y)+center.y),q,exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(implode_image);
}
if (SyncCacheViewAuthenticPixels(implode_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,ImplodeImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
implode_view=DestroyCacheView(implode_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
implode_image=DestroyImage(implode_image);
return(implode_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The MorphImages() method requires a minimum of two images. The first
% image is transformed into the second by a number of intervening images
% as specified by frames.
%
% The format of the MorphImage method is:
%
% Image *MorphImages(const Image *image,const size_t number_frames,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o number_frames: Define the number of in-between image to generate.
% The more in-between frames, the smoother the morph.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphImages(const Image *image,const size_t number_frames,
ExceptionInfo *exception)
{
#define MorphImageTag "Morph/Image"
double
alpha,
beta;
Image
*morph_image,
*morph_images;
MagickBooleanType
status;
MagickOffsetType
scene;
const Image
*next;
ssize_t
n;
ssize_t
y;
/*
Clone first frame in sequence.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
morph_images=CloneImage(image,0,0,MagickTrue,exception);
if (morph_images == (Image *) NULL)
return((Image *) NULL);
if (GetNextImageInList(image) == (Image *) NULL)
{
/*
Morph single image.
*/
for (n=1; n < (ssize_t) number_frames; n++)
{
morph_image=CloneImage(image,0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,(MagickOffsetType) n,
number_frames);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(GetFirstImageInList(morph_images));
}
/*
Morph image sequence.
*/
status=MagickTrue;
scene=0;
next=image;
for ( ; GetNextImageInList(next) != (Image *) NULL; next=GetNextImageInList(next))
{
for (n=0; n < (ssize_t) number_frames; n++)
{
CacheView
*image_view,
*morph_view;
beta=(double) (n+1.0)/(double) (number_frames+1.0);
alpha=1.0-beta;
morph_image=ResizeImage(next,(size_t) (alpha*next->columns+beta*
GetNextImageInList(next)->columns+0.5),(size_t) (alpha*next->rows+beta*
GetNextImageInList(next)->rows+0.5),next->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
status=SetImageStorageClass(morph_image,DirectClass,exception);
if (status == MagickFalse)
{
morph_image=DestroyImage(morph_image);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
morph_image=ResizeImage(GetNextImageInList(next),morph_images->columns,
morph_images->rows,GetNextImageInList(next)->filter,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
image_view=AcquireVirtualCacheView(morph_image,exception);
morph_view=AcquireAuthenticCacheView(morph_images,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(morph_image,morph_image,morph_image->rows,1)
#endif
for (y=0; y < (ssize_t) morph_images->rows; y++)
{
MagickBooleanType
sync;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,morph_image->columns,1,
exception);
q=GetCacheViewAuthenticPixels(morph_view,0,y,morph_images->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) morph_images->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(morph_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(morph_image,i);
PixelTrait traits = GetPixelChannelTraits(morph_image,channel);
PixelTrait morph_traits=GetPixelChannelTraits(morph_images,channel);
if ((traits == UndefinedPixelTrait) ||
(morph_traits == UndefinedPixelTrait))
continue;
if ((morph_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(morph_image,channel,p[i],q);
continue;
}
SetPixelChannel(morph_image,channel,ClampToQuantum(alpha*
GetPixelChannel(morph_images,channel,q)+beta*p[i]),q);
}
p+=GetPixelChannels(morph_image);
q+=GetPixelChannels(morph_images);
}
sync=SyncCacheViewAuthenticPixels(morph_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
morph_view=DestroyCacheView(morph_view);
image_view=DestroyCacheView(image_view);
morph_image=DestroyImage(morph_image);
}
if (n < (ssize_t) number_frames)
break;
/*
Clone last frame in sequence.
*/
morph_image=CloneImage(GetNextImageInList(next),0,0,MagickTrue,exception);
if (morph_image == (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
AppendImageToList(&morph_images,morph_image);
morph_images=GetLastImageInList(morph_images);
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,MorphImageTag,scene,
GetImageListLength(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
scene++;
}
if (GetNextImageInList(next) != (Image *) NULL)
{
morph_images=DestroyImageList(morph_images);
return((Image *) NULL);
}
return(GetFirstImageInList(morph_images));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P l a s m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PlasmaImage() initializes an image with plasma fractal values. The image
% must be initialized with a base color and the random number generator
% seeded before this method is called.
%
% The format of the PlasmaImage method is:
%
% MagickBooleanType PlasmaImage(Image *image,const SegmentInfo *segment,
% size_t attenuate,size_t depth,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o segment: Define the region to apply plasma fractals values.
%
% o attenuate: Define the plasma attenuation factor.
%
% o depth: Limit the plasma recursion depth.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PlasmaPixel(RandomInfo *magick_restrict random_info,
const double pixel,const double noise)
{
MagickRealType
plasma;
plasma=pixel+noise*GetPseudoRandomValue(random_info)-noise/2.0;
return(ClampToQuantum(plasma));
}
static MagickBooleanType PlasmaImageProxy(Image *image,CacheView *image_view,
CacheView *u_view,CacheView *v_view,RandomInfo *magick_restrict random_info,
const SegmentInfo *magick_restrict segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
double
plasma;
MagickStatusType
status;
const Quantum
*magick_restrict u,
*magick_restrict v;
Quantum
*magick_restrict q;
ssize_t
i;
ssize_t
x,
x_mid,
y,
y_mid;
if ((fabs(segment->x2-segment->x1) < MagickEpsilon) &&
(fabs(segment->y2-segment->y1) < MagickEpsilon))
return(MagickTrue);
if (depth != 0)
{
SegmentInfo
local_info;
/*
Divide the area into quadrants and recurse.
*/
depth--;
attenuate++;
x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5));
y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5));
local_info=(*segment);
local_info.x2=(double) x_mid;
local_info.y2=(double) y_mid;
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.y1=(double) y_mid;
local_info.x2=(double) x_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y2=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
local_info=(*segment);
local_info.x1=(double) x_mid;
local_info.y1=(double) y_mid;
status&=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,
&local_info,attenuate,depth,exception);
return(status == 0 ? MagickFalse : MagickTrue);
}
x_mid=CastDoubleToLong(ceil((segment->x1+segment->x2)/2-0.5));
y_mid=CastDoubleToLong(ceil((segment->y1+segment->y2)/2-0.5));
if ((fabs(segment->x1-x_mid) < MagickEpsilon) &&
(fabs(segment->x2-x_mid) < MagickEpsilon) &&
(fabs(segment->y1-y_mid) < MagickEpsilon) &&
(fabs(segment->y2-y_mid) < MagickEpsilon))
return(MagickFalse);
/*
Average pixels and apply plasma.
*/
status=MagickTrue;
plasma=(double) QuantumRange/(2.0*attenuate);
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->x2-x_mid) >= MagickEpsilon))
{
/*
Left pixel.
*/
x=CastDoubleToLong(ceil(segment->x1-0.5));
u=GetCacheViewVirtualPixels(u_view,x,CastDoubleToLong(ceil(
segment->y1-0.5)),1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,CastDoubleToLong(ceil(
segment->y2-0.5)),1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
if (fabs(segment->x1-segment->x2) >= MagickEpsilon)
{
/*
Right pixel.
*/
x=CastDoubleToLong(ceil(segment->x2-0.5));
u=GetCacheViewVirtualPixels(u_view,x,CastDoubleToLong(ceil(
segment->y1-0.5)),1,1,exception);
v=GetCacheViewVirtualPixels(v_view,x,CastDoubleToLong(ceil(
segment->y2-0.5)),1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickFalse);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->y1-y_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
if ((fabs(segment->x1-x_mid) >= MagickEpsilon) ||
(fabs(segment->y2-y_mid) >= MagickEpsilon))
{
/*
Bottom pixel.
*/
y=CastDoubleToLong(ceil(segment->y2-0.5));
u=GetCacheViewVirtualPixels(u_view,CastDoubleToLong(ceil(
segment->x1-0.5)),y,1,1,exception);
v=GetCacheViewVirtualPixels(v_view,CastDoubleToLong(ceil(
segment->x2-0.5)),y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if (fabs(segment->y1-segment->y2) >= MagickEpsilon)
{
/*
Top pixel.
*/
y=CastDoubleToLong(ceil(segment->y1-0.5));
u=GetCacheViewVirtualPixels(u_view,CastDoubleToLong(ceil(
segment->x1-0.5)),y,1,1,exception);
v=GetCacheViewVirtualPixels(v_view,CastDoubleToLong(ceil(
segment->x2-0.5)),y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
}
if ((fabs(segment->x1-segment->x2) >= MagickEpsilon) ||
(fabs(segment->y1-segment->y2) >= MagickEpsilon))
{
/*
Middle pixel.
*/
x=CastDoubleToLong(ceil(segment->x1-0.5));
y=CastDoubleToLong(ceil(segment->y1-0.5));
u=GetCacheViewVirtualPixels(u_view,x,y,1,1,exception);
x=CastDoubleToLong(ceil(segment->x2-0.5));
y=CastDoubleToLong(ceil(segment->y2-0.5));
v=GetCacheViewVirtualPixels(v_view,x,y,1,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,x_mid,y_mid,1,1,exception);
if ((u == (const Quantum *) NULL) || (v == (const Quantum *) NULL) ||
(q == (Quantum *) NULL))
return(MagickTrue);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PlasmaPixel(random_info,((double) u[i]+v[i])/2.0,plasma);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
if ((fabs(segment->x2-segment->x1) < 3.0) &&
(fabs(segment->y2-segment->y1) < 3.0))
return(status == 0 ? MagickFalse : MagickTrue);
return(MagickFalse);
}
MagickExport MagickBooleanType PlasmaImage(Image *image,
const SegmentInfo *segment,size_t attenuate,size_t depth,
ExceptionInfo *exception)
{
CacheView
*image_view,
*u_view,
*v_view;
MagickBooleanType
status;
RandomInfo
*random_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
u_view=AcquireVirtualCacheView(image,exception);
v_view=AcquireVirtualCacheView(image,exception);
random_info=AcquireRandomInfo();
status=PlasmaImageProxy(image,image_view,u_view,v_view,random_info,segment,
attenuate,depth,exception);
random_info=DestroyRandomInfo(random_info);
v_view=DestroyCacheView(v_view);
u_view=DestroyCacheView(u_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l a r o i d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolaroidImage() simulates a Polaroid picture.
%
% The format of the PolaroidImage method is:
%
% Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
% const char *caption,const double angle,
% const PixelInterpolateMethod method,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o caption: the Polaroid caption.
%
% o angle: Apply the effect along this angle.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolaroidImage(const Image *image,const DrawInfo *draw_info,
const char *caption,const double angle,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
Image
*bend_image,
*caption_image,
*flop_image,
*picture_image,
*polaroid_image,
*rotate_image,
*trim_image;
size_t
height;
ssize_t
quantum;
/*
Simulate a Polaroid picture.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
quantum=(ssize_t) MagickMax(MagickMax((double) image->columns,(double)
image->rows)/25.0,10.0);
height=image->rows+2*quantum;
caption_image=(Image *) NULL;
if (caption != (const char *) NULL)
{
char
*text;
/*
Generate caption image.
*/
caption_image=CloneImage(image,image->columns,1,MagickTrue,exception);
if (caption_image == (Image *) NULL)
return((Image *) NULL);
text=InterpretImageProperties((ImageInfo *) NULL,(Image *) image,caption,
exception);
if (text != (char *) NULL)
{
char
geometry[MagickPathExtent];
DrawInfo
*annotate_info;
MagickBooleanType
status;
ssize_t
count;
TypeMetric
metrics;
annotate_info=CloneDrawInfo((const ImageInfo *) NULL,draw_info);
(void) CloneString(&annotate_info->text,text);
count=FormatMagickCaption(caption_image,annotate_info,MagickTrue,
&metrics,&text,exception);
status=SetImageExtent(caption_image,image->columns,(size_t)
((count+1)*(metrics.ascent-metrics.descent)+0.5),exception);
if (status == MagickFalse)
caption_image=DestroyImage(caption_image);
else
{
caption_image->background_color=image->border_color;
(void) SetImageBackgroundColor(caption_image,exception);
(void) CloneString(&annotate_info->text,text);
(void) FormatLocaleString(geometry,MagickPathExtent,"+0+%.20g",
metrics.ascent);
if (annotate_info->gravity == UndefinedGravity)
(void) CloneString(&annotate_info->geometry,AcquireString(
geometry));
(void) AnnotateImage(caption_image,annotate_info,exception);
height+=caption_image->rows;
}
annotate_info=DestroyDrawInfo(annotate_info);
text=DestroyString(text);
}
}
picture_image=CloneImage(image,image->columns+2*quantum,height,MagickTrue,
exception);
if (picture_image == (Image *) NULL)
{
if (caption_image != (Image *) NULL)
caption_image=DestroyImage(caption_image);
return((Image *) NULL);
}
picture_image->background_color=image->border_color;
(void) SetImageBackgroundColor(picture_image,exception);
(void) CompositeImage(picture_image,image,OverCompositeOp,MagickTrue,quantum,
quantum,exception);
if (caption_image != (Image *) NULL)
{
(void) CompositeImage(picture_image,caption_image,OverCompositeOp,
MagickTrue,quantum,(ssize_t) (image->rows+3*quantum/2),exception);
caption_image=DestroyImage(caption_image);
}
(void) QueryColorCompliance("none",AllCompliance,
&picture_image->background_color,exception);
(void) SetImageAlphaChannel(picture_image,OpaqueAlphaChannel,exception);
rotate_image=RotateImage(picture_image,90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
bend_image=WaveImage(picture_image,0.01*picture_image->rows,2.0*
picture_image->columns,method,exception);
picture_image=DestroyImage(picture_image);
if (bend_image == (Image *) NULL)
return((Image *) NULL);
picture_image=bend_image;
rotate_image=RotateImage(picture_image,-90.0,exception);
picture_image=DestroyImage(picture_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
picture_image=rotate_image;
picture_image->background_color=image->background_color;
polaroid_image=ShadowImage(picture_image,80.0,2.0,quantum/3,quantum/3,
exception);
if (polaroid_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
flop_image=FlopImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (flop_image == (Image *) NULL)
{
picture_image=DestroyImage(picture_image);
return(picture_image);
}
polaroid_image=flop_image;
(void) CompositeImage(polaroid_image,picture_image,OverCompositeOp,
MagickTrue,(ssize_t) (-0.01*picture_image->columns/2.0),0L,exception);
picture_image=DestroyImage(picture_image);
(void) QueryColorCompliance("none",AllCompliance,
&polaroid_image->background_color,exception);
rotate_image=RotateImage(polaroid_image,angle,exception);
polaroid_image=DestroyImage(polaroid_image);
if (rotate_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=rotate_image;
trim_image=TrimImage(polaroid_image,exception);
polaroid_image=DestroyImage(polaroid_image);
if (trim_image == (Image *) NULL)
return((Image *) NULL);
polaroid_image=trim_image;
return(polaroid_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p i a T o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickSepiaToneImage() applies a special effect to the image, similar to the
% effect achieved in a photo darkroom by sepia toning. Threshold ranges from
% 0 to QuantumRange and is a measure of the extent of the sepia toning. A
% threshold of 80% is a good starting point for a reasonable tone.
%
% The format of the SepiaToneImage method is:
%
% Image *SepiaToneImage(const Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: the tone threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SepiaToneImage(const Image *image,const double threshold,
ExceptionInfo *exception)
{
#define SepiaToneImageTag "SepiaTone/Image"
CacheView
*image_view,
*sepia_view;
Image
*sepia_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize sepia-toned image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
sepia_image=CloneImage(image,0,0,MagickTrue,exception);
if (sepia_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(sepia_image,DirectClass,exception) == MagickFalse)
{
sepia_image=DestroyImage(sepia_image);
return((Image *) NULL);
}
/*
Tone each row of the image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
sepia_view=AcquireAuthenticCacheView(sepia_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,sepia_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(sepia_view,0,y,sepia_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
intensity,
tone;
intensity=GetPixelIntensity(image,p);
tone=intensity > threshold ? (double) QuantumRange : intensity+
(double) QuantumRange-threshold;
SetPixelRed(sepia_image,ClampToQuantum(tone),q);
tone=intensity > (7.0*threshold/6.0) ? (double) QuantumRange :
intensity+(double) QuantumRange-7.0*threshold/6.0;
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
tone=intensity < (threshold/6.0) ? 0 : intensity-threshold/6.0;
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
tone=threshold/7.0;
if ((double) GetPixelGreen(image,q) < tone)
SetPixelGreen(sepia_image,ClampToQuantum(tone),q);
if ((double) GetPixelBlue(image,q) < tone)
SetPixelBlue(sepia_image,ClampToQuantum(tone),q);
SetPixelAlpha(sepia_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(sepia_image);
}
if (SyncCacheViewAuthenticPixels(sepia_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SepiaToneImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
sepia_view=DestroyCacheView(sepia_view);
image_view=DestroyCacheView(image_view);
(void) NormalizeImage(sepia_image,exception);
(void) ContrastImage(sepia_image,MagickTrue,exception);
if (status == MagickFalse)
sepia_image=DestroyImage(sepia_image);
return(sepia_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a d o w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShadowImage() simulates a shadow from the specified image and returns it.
%
% The format of the ShadowImage method is:
%
% Image *ShadowImage(const Image *image,const double alpha,
% const double sigma,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: percentage transparency.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x_offset: the shadow x-offset.
%
% o y_offset: the shadow y-offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShadowImage(const Image *image,const double alpha,
const double sigma,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define ShadowImageTag "Shadow/Image"
CacheView
*image_view;
ChannelType
channel_mask;
Image
*border_image,
*clone_image,
*shadow_image;
MagickBooleanType
status;
PixelInfo
background_color;
RectangleInfo
border_info;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(clone_image,sRGBColorspace,exception);
(void) SetImageVirtualPixelMethod(clone_image,EdgeVirtualPixelMethod,
exception);
border_info.width=(size_t) floor(2.0*sigma+0.5);
border_info.height=(size_t) floor(2.0*sigma+0.5);
border_info.x=0;
border_info.y=0;
(void) QueryColorCompliance("none",AllCompliance,&clone_image->border_color,
exception);
clone_image->alpha_trait=BlendPixelTrait;
border_image=BorderImage(clone_image,&border_info,OverCompositeOp,exception);
clone_image=DestroyImage(clone_image);
if (border_image == (Image *) NULL)
return((Image *) NULL);
if (border_image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(border_image,OpaqueAlphaChannel,exception);
/*
Shadow image.
*/
status=MagickTrue;
background_color=border_image->background_color;
background_color.alpha_trait=BlendPixelTrait;
image_view=AcquireAuthenticCacheView(border_image,exception);
for (y=0; y < (ssize_t) border_image->rows; y++)
{
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,border_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) border_image->columns; x++)
{
if (border_image->alpha_trait != UndefinedPixelTrait)
background_color.alpha=GetPixelAlpha(border_image,q)*alpha/100.0;
SetPixelViaPixelInfo(border_image,&background_color,q);
q+=GetPixelChannels(border_image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
{
border_image=DestroyImage(border_image);
return((Image *) NULL);
}
channel_mask=SetImageChannelMask(border_image,AlphaChannel);
shadow_image=BlurImage(border_image,0.0,sigma,exception);
border_image=DestroyImage(border_image);
if (shadow_image == (Image *) NULL)
return((Image *) NULL);
(void) SetPixelChannelMask(shadow_image,channel_mask);
if (shadow_image->page.width == 0)
shadow_image->page.width=shadow_image->columns;
if (shadow_image->page.height == 0)
shadow_image->page.height=shadow_image->rows;
shadow_image->page.width+=x_offset-(ssize_t) border_info.width;
shadow_image->page.height+=y_offset-(ssize_t) border_info.height;
shadow_image->page.x+=x_offset-(ssize_t) border_info.width;
shadow_image->page.y+=y_offset-(ssize_t) border_info.height;
return(shadow_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S k e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SketchImage() simulates a pencil sketch. We convolve the image with a
% Gaussian operator of the given radius and standard deviation (sigma). For
% reasonable results, radius should be larger than sigma. Use a radius of 0
% and SketchImage() selects a suitable radius for you. Angle gives the angle
% of the sketch.
%
% The format of the SketchImage method is:
%
% Image *SketchImage(const Image *image,const double radius,
% const double sigma,const double angle,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the Gaussian, in pixels, not counting the
% center pixel.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o angle: apply the effect along this angle.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SketchImage(const Image *image,const double radius,
const double sigma,const double angle,ExceptionInfo *exception)
{
CacheView
*random_view;
Image
*blend_image,
*blur_image,
*dodge_image,
*random_image,
*sketch_image;
MagickBooleanType
status;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
/*
Sketch image.
*/
random_image=CloneImage(image,image->columns << 1,image->rows << 1,
MagickTrue,exception);
if (random_image == (Image *) NULL)
return((Image *) NULL);
status=MagickTrue;
random_info=AcquireRandomInfoThreadSet();
random_view=AcquireAuthenticCacheView(random_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(random_image,random_image,random_image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) random_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(random_view,0,y,random_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) random_image->columns; x++)
{
double
value;
ssize_t
i;
value=GetPseudoRandomValue(random_info[id]);
for (i=0; i < (ssize_t) GetPixelChannels(random_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=ClampToQuantum(QuantumRange*value);
}
q+=GetPixelChannels(random_image);
}
if (SyncCacheViewAuthenticPixels(random_view,exception) == MagickFalse)
status=MagickFalse;
}
random_view=DestroyCacheView(random_view);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
{
random_image=DestroyImage(random_image);
return(random_image);
}
blur_image=MotionBlurImage(random_image,radius,sigma,angle,exception);
random_image=DestroyImage(random_image);
if (blur_image == (Image *) NULL)
return((Image *) NULL);
dodge_image=EdgeImage(blur_image,radius,exception);
blur_image=DestroyImage(blur_image);
if (dodge_image == (Image *) NULL)
return((Image *) NULL);
status=ClampImage(dodge_image,exception);
if (status != MagickFalse)
status=NormalizeImage(dodge_image,exception);
if (status != MagickFalse)
status=NegateImage(dodge_image,MagickFalse,exception);
if (status != MagickFalse)
status=TransformImage(&dodge_image,(char *) NULL,"50%",exception);
sketch_image=CloneImage(image,0,0,MagickTrue,exception);
if (sketch_image == (Image *) NULL)
{
dodge_image=DestroyImage(dodge_image);
return((Image *) NULL);
}
(void) CompositeImage(sketch_image,dodge_image,ColorDodgeCompositeOp,
MagickTrue,0,0,exception);
dodge_image=DestroyImage(dodge_image);
blend_image=CloneImage(image,0,0,MagickTrue,exception);
if (blend_image == (Image *) NULL)
{
sketch_image=DestroyImage(sketch_image);
return((Image *) NULL);
}
if (blend_image->alpha_trait != BlendPixelTrait)
(void) SetImageAlpha(blend_image,TransparentAlpha,exception);
(void) SetImageArtifact(blend_image,"compose:args","20x80");
(void) CompositeImage(sketch_image,blend_image,BlendCompositeOp,MagickTrue,
0,0,exception);
blend_image=DestroyImage(blend_image);
return(sketch_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S o l a r i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SolarizeImage() applies a special effect to the image, similar to the effect
% achieved in a photo darkroom by selectively exposing areas of photo
% sensitive paper to light. Threshold ranges from 0 to QuantumRange and is a
% measure of the extent of the solarization.
%
% The format of the SolarizeImage method is:
%
% MagickBooleanType SolarizeImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the extent of the solarization.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SolarizeImage(Image *image,
const double threshold,ExceptionInfo *exception)
{
#define SolarizeImageTag "Solarize/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if (image->storage_class == PseudoClass)
{
ssize_t
i;
/*
Solarize colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((double) image->colormap[i].red > threshold)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((double) image->colormap[i].green > threshold)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((double) image->colormap[i].blue > threshold)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
return(SyncImage(image,exception));
}
/*
Solarize image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] > threshold)
q[i]=QuantumRange-q[i];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SolarizeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e g a n o I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SteganoImage() hides a digital watermark within the image. Recover
% the hidden watermark later to prove that the authenticity of an image.
% Offset defines the start position within the image to hide the watermark.
%
% The format of the SteganoImage method is:
%
% Image *SteganoImage(const Image *image,Image *watermark,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o watermark: the watermark image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SteganoImage(const Image *image,const Image *watermark,
ExceptionInfo *exception)
{
#define GetBit(alpha,i) ((((size_t) (alpha) >> (size_t) (i)) & 0x01) != 0)
#define SetBit(alpha,i,set) (Quantum) ((set) != 0 ? (size_t) (alpha) \
| (one << (size_t) (i)) : (size_t) (alpha) & ~(one << (size_t) (i)))
#define SteganoImageTag "Stegano/Image"
CacheView
*stegano_view,
*watermark_view;
Image
*stegano_image;
int
c;
MagickBooleanType
status;
PixelInfo
pixel;
Quantum
*q;
ssize_t
x;
size_t
depth,
one;
ssize_t
i,
j,
k,
y;
/*
Initialize steganographic image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(watermark != (const Image *) NULL);
assert(watermark->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
one=1UL;
stegano_image=CloneImage(image,0,0,MagickTrue,exception);
if (stegano_image == (Image *) NULL)
return((Image *) NULL);
stegano_image->depth=MAGICKCORE_QUANTUM_DEPTH;
if (SetImageStorageClass(stegano_image,DirectClass,exception) == MagickFalse)
{
stegano_image=DestroyImage(stegano_image);
return((Image *) NULL);
}
/*
Hide watermark in low-order bits of image.
*/
c=0;
i=0;
j=0;
depth=stegano_image->depth;
k=stegano_image->offset;
status=MagickTrue;
watermark_view=AcquireVirtualCacheView(watermark,exception);
stegano_view=AcquireAuthenticCacheView(stegano_image,exception);
for (i=(ssize_t) depth-1; (i >= 0) && (j < (ssize_t) depth); i--)
{
for (y=0; (y < (ssize_t) watermark->rows) && (j < (ssize_t) depth); y++)
{
for (x=0; (x < (ssize_t) watermark->columns) && (j < (ssize_t) depth); x++)
{
ssize_t
offset;
(void) GetOneCacheViewVirtualPixelInfo(watermark_view,x,y,&pixel,
exception);
offset=k/(ssize_t) stegano_image->columns;
if (offset >= (ssize_t) stegano_image->rows)
break;
q=GetCacheViewAuthenticPixels(stegano_view,k % (ssize_t)
stegano_image->columns,k/(ssize_t) stegano_image->columns,1,1,
exception);
if (q == (Quantum *) NULL)
break;
switch (c)
{
case 0:
{
SetPixelRed(stegano_image,SetBit(GetPixelRed(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 1:
{
SetPixelGreen(stegano_image,SetBit(GetPixelGreen(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
case 2:
{
SetPixelBlue(stegano_image,SetBit(GetPixelBlue(stegano_image,q),j,
GetBit(GetPixelInfoIntensity(stegano_image,&pixel),i)),q);
break;
}
}
if (SyncCacheViewAuthenticPixels(stegano_view,exception) == MagickFalse)
break;
c++;
if (c == 3)
c=0;
k++;
if (k == (ssize_t) (stegano_image->columns*stegano_image->columns))
k=0;
if (k == stegano_image->offset)
j++;
}
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SteganoImageTag,(MagickOffsetType)
(depth-i),depth);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
stegano_view=DestroyCacheView(stegano_view);
watermark_view=DestroyCacheView(watermark_view);
if (status == MagickFalse)
stegano_image=DestroyImage(stegano_image);
return(stegano_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t e r e o A n a g l y p h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StereoAnaglyphImage() combines two images and produces a single image that
% is the composite of a left and right image of a stereo pair. Special
% red-green stereo glasses are required to view this effect.
%
% The format of the StereoAnaglyphImage method is:
%
% Image *StereoImage(const Image *left_image,const Image *right_image,
% ExceptionInfo *exception)
% Image *StereoAnaglyphImage(const Image *left_image,
% const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o left_image: the left image.
%
% o right_image: the right image.
%
% o exception: return any errors or warnings in this structure.
%
% o x_offset: amount, in pixels, by which the left image is offset to the
% right of the right image.
%
% o y_offset: amount, in pixels, by which the left image is offset to the
% bottom of the right image.
%
%
*/
MagickExport Image *StereoImage(const Image *left_image,
const Image *right_image,ExceptionInfo *exception)
{
return(StereoAnaglyphImage(left_image,right_image,0,0,exception));
}
MagickExport Image *StereoAnaglyphImage(const Image *left_image,
const Image *right_image,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
#define StereoImageTag "Stereo/Image"
const Image
*image;
Image
*stereo_image;
MagickBooleanType
status;
ssize_t
y;
assert(left_image != (const Image *) NULL);
assert(left_image->signature == MagickCoreSignature);
if (left_image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
left_image->filename);
assert(right_image != (const Image *) NULL);
assert(right_image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=left_image;
if ((left_image->columns != right_image->columns) ||
(left_image->rows != right_image->rows))
ThrowImageException(ImageError,"LeftAndRightImageSizesDiffer");
/*
Initialize stereo image attributes.
*/
stereo_image=CloneImage(left_image,left_image->columns,left_image->rows,
MagickTrue,exception);
if (stereo_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(stereo_image,DirectClass,exception) == MagickFalse)
{
stereo_image=DestroyImage(stereo_image);
return((Image *) NULL);
}
(void) SetImageColorspace(stereo_image,sRGBColorspace,exception);
/*
Copy left image to red channel and right image to blue channel.
*/
status=MagickTrue;
for (y=0; y < (ssize_t) stereo_image->rows; y++)
{
const Quantum
*magick_restrict p,
*magick_restrict q;
ssize_t
x;
Quantum
*magick_restrict r;
p=GetVirtualPixels(left_image,-x_offset,y-y_offset,image->columns,1,
exception);
q=GetVirtualPixels(right_image,0,y,right_image->columns,1,exception);
r=QueueAuthenticPixels(stereo_image,0,y,stereo_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL) ||
(r == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) stereo_image->columns; x++)
{
SetPixelRed(stereo_image,GetPixelRed(left_image,p),r);
SetPixelGreen(stereo_image,GetPixelGreen(right_image,q),r);
SetPixelBlue(stereo_image,GetPixelBlue(right_image,q),r);
if ((GetPixelAlphaTraits(stereo_image) & CopyPixelTrait) != 0)
SetPixelAlpha(stereo_image,(GetPixelAlpha(left_image,p)+
GetPixelAlpha(right_image,q))/2,r);
p+=GetPixelChannels(left_image);
q+=GetPixelChannels(right_image);
r+=GetPixelChannels(stereo_image);
}
if (SyncAuthenticPixels(stereo_image,exception) == MagickFalse)
break;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,StereoImageTag,(MagickOffsetType) y,
stereo_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
if (status == MagickFalse)
stereo_image=DestroyImage(stereo_image);
return(stereo_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S w i r l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SwirlImage() swirls the pixels about the center of the image, where
% degrees indicates the sweep of the arc through which each pixel is moved.
% You get a more dramatic effect as the degrees move from 1 to 360.
%
% The format of the SwirlImage method is:
%
% Image *SwirlImage(const Image *image,double degrees,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o degrees: Define the tightness of the swirling effect.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SwirlImage(const Image *image,double degrees,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define SwirlImageTag "Swirl/Image"
CacheView
*canvas_view,
*interpolate_view,
*swirl_view;
double
radius;
Image
*canvas_image,
*swirl_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PointInfo
center,
scale;
ssize_t
y;
/*
Initialize swirl image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
swirl_image=CloneImage(canvas_image,0,0,MagickTrue,exception);
if (swirl_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(swirl_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
swirl_image=DestroyImage(swirl_image);
return((Image *) NULL);
}
if (swirl_image->background_color.alpha_trait != UndefinedPixelTrait)
(void) SetImageAlphaChannel(swirl_image,OnAlphaChannel,exception);
/*
Compute scaling factor.
*/
center.x=(double) canvas_image->columns/2.0;
center.y=(double) canvas_image->rows/2.0;
radius=MagickMax(center.x,center.y);
scale.x=1.0;
scale.y=1.0;
if (canvas_image->columns > canvas_image->rows)
scale.y=(double) canvas_image->columns/(double) canvas_image->rows;
else
if (canvas_image->columns < canvas_image->rows)
scale.x=(double) canvas_image->rows/(double) canvas_image->columns;
degrees=(double) DegreesToRadians(degrees);
/*
Swirl image.
*/
status=MagickTrue;
progress=0;
canvas_view=AcquireVirtualCacheView(canvas_image,exception);
interpolate_view=AcquireVirtualCacheView(image,exception);
swirl_view=AcquireAuthenticCacheView(swirl_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,swirl_image,canvas_image->rows,1)
#endif
for (y=0; y < (ssize_t) canvas_image->rows; y++)
{
double
distance;
PointInfo
delta;
const Quantum
*magick_restrict p;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(swirl_view,0,y,swirl_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
delta.y=scale.y*(double) (y-center.y);
for (x=0; x < (ssize_t) canvas_image->columns; x++)
{
/*
Determine if the pixel is within an ellipse.
*/
delta.x=scale.x*(double) (x-center.x);
distance=delta.x*delta.x+delta.y*delta.y;
if (distance >= (radius*radius))
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(canvas_image); i++)
{
PixelChannel channel = GetPixelChannelChannel(canvas_image,i);
PixelTrait traits = GetPixelChannelTraits(canvas_image,channel);
PixelTrait swirl_traits = GetPixelChannelTraits(swirl_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(swirl_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(swirl_image,channel,p[i],q);
}
}
else
{
double
cosine,
factor,
sine;
/*
Swirl the pixel.
*/
factor=1.0-sqrt((double) distance)/radius;
sine=sin((double) (degrees*factor*factor));
cosine=cos((double) (degrees*factor*factor));
status=InterpolatePixelChannels(canvas_image,interpolate_view,
swirl_image,method,((cosine*delta.x-sine*delta.y)/scale.x+center.x),
(double) ((sine*delta.x+cosine*delta.y)/scale.y+center.y),q,
exception);
if (status == MagickFalse)
break;
}
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(swirl_image);
}
if (SyncCacheViewAuthenticPixels(swirl_view,exception) == MagickFalse)
status=MagickFalse;
if (canvas_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,SwirlImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
swirl_view=DestroyCacheView(swirl_view);
interpolate_view=DestroyCacheView(interpolate_view);
canvas_view=DestroyCacheView(canvas_view);
canvas_image=DestroyImage(canvas_image);
if (status == MagickFalse)
swirl_image=DestroyImage(swirl_image);
return(swirl_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TintImage() applies a color vector to each pixel in the image. The length
% of the vector is 0 for black and white and at its maximum for the midtones.
% The vector weighting function is f(x)=(1-(4.0*((x-0.5)*(x-0.5))))
%
% The format of the TintImage method is:
%
% Image *TintImage(const Image *image,const char *blend,
% const PixelInfo *tint,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o blend: A color value used for tinting.
%
% o tint: A color value used for tinting.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TintImage(const Image *image,const char *blend,
const PixelInfo *tint,ExceptionInfo *exception)
{
#define TintImageTag "Tint/Image"
CacheView
*image_view,
*tint_view;
double
intensity;
GeometryInfo
geometry_info;
Image
*tint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
color_vector;
MagickStatusType
flags;
ssize_t
y;
/*
Allocate tint image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
tint_image=CloneImage(image,0,0,MagickTrue,exception);
if (tint_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(tint_image,DirectClass,exception) == MagickFalse)
{
tint_image=DestroyImage(tint_image);
return((Image *) NULL);
}
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsPixelInfoGray(tint) == MagickFalse))
(void) SetImageColorspace(tint_image,sRGBColorspace,exception);
if (blend == (const char *) NULL)
return(tint_image);
/*
Determine RGB values of the color.
*/
GetPixelInfo(image,&color_vector);
flags=ParseGeometry(blend,&geometry_info);
color_vector.red=geometry_info.rho;
color_vector.green=geometry_info.rho;
color_vector.blue=geometry_info.rho;
color_vector.alpha=(MagickRealType) OpaqueAlpha;
if ((flags & SigmaValue) != 0)
color_vector.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
color_vector.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
color_vector.alpha=geometry_info.psi;
if (image->colorspace == CMYKColorspace)
{
color_vector.black=geometry_info.rho;
if ((flags & PsiValue) != 0)
color_vector.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
color_vector.alpha=geometry_info.chi;
}
intensity=(double) GetPixelInfoIntensity((const Image *) NULL,tint);
color_vector.red=(double) (color_vector.red*tint->red/100.0-intensity);
color_vector.green=(double) (color_vector.green*tint->green/100.0-intensity);
color_vector.blue=(double) (color_vector.blue*tint->blue/100.0-intensity);
color_vector.black=(double) (color_vector.black*tint->black/100.0-intensity);
color_vector.alpha=(double) (color_vector.alpha*tint->alpha/100.0-intensity);
/*
Tint image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
tint_view=AcquireAuthenticCacheView(tint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,tint_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(tint_view,0,y,tint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelInfo
pixel;
double
weight;
GetPixelInfo(image,&pixel);
weight=QuantumScale*GetPixelRed(image,p)-0.5;
pixel.red=(MagickRealType) GetPixelRed(image,p)+color_vector.red*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelGreen(image,p)-0.5;
pixel.green=(MagickRealType) GetPixelGreen(image,p)+color_vector.green*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlue(image,p)-0.5;
pixel.blue=(MagickRealType) GetPixelBlue(image,p)+color_vector.blue*
(1.0-(4.0*(weight*weight)));
weight=QuantumScale*GetPixelBlack(image,p)-0.5;
pixel.black=(MagickRealType) GetPixelBlack(image,p)+color_vector.black*
(1.0-(4.0*(weight*weight)));
pixel.alpha=(MagickRealType) GetPixelAlpha(image,p);
SetPixelViaPixelInfo(tint_image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(tint_image);
}
if (SyncCacheViewAuthenticPixels(tint_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TintImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
tint_view=DestroyCacheView(tint_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
tint_image=DestroyImage(tint_image);
return(tint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% V i g n e t t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% VignetteImage() softens the edges of the image in vignette style.
%
% The format of the VignetteImage method is:
%
% Image *VignetteImage(const Image *image,const double radius,
% const double sigma,const ssize_t x,const ssize_t y,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the pixel neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o x, y: Define the x and y ellipse offset.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *VignetteImage(const Image *image,const double radius,
const double sigma,const ssize_t x,const ssize_t y,ExceptionInfo *exception)
{
char
ellipse[MagickPathExtent];
DrawInfo
*draw_info;
Image
*canvas,
*blur_image,
*oval_image,
*vignette_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas=CloneImage(image,0,0,MagickTrue,exception);
if (canvas == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(canvas,DirectClass,exception) == MagickFalse)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
canvas->alpha_trait=BlendPixelTrait;
oval_image=CloneImage(canvas,canvas->columns,canvas->rows,MagickTrue,
exception);
if (oval_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
(void) QueryColorCompliance("#000000",AllCompliance,
&oval_image->background_color,exception);
(void) SetImageBackgroundColor(oval_image,exception);
draw_info=CloneDrawInfo((const ImageInfo *) NULL,(const DrawInfo *) NULL);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->fill,
exception);
(void) QueryColorCompliance("#ffffff",AllCompliance,&draw_info->stroke,
exception);
(void) FormatLocaleString(ellipse,MagickPathExtent,"ellipse %g,%g,%g,%g,"
"0.0,360.0",image->columns/2.0,image->rows/2.0,image->columns/2.0-x,
image->rows/2.0-y);
draw_info->primitive=AcquireString(ellipse);
(void) DrawImage(oval_image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
blur_image=BlurImage(oval_image,radius,sigma,exception);
oval_image=DestroyImage(oval_image);
if (blur_image == (Image *) NULL)
{
canvas=DestroyImage(canvas);
return((Image *) NULL);
}
blur_image->alpha_trait=UndefinedPixelTrait;
(void) CompositeImage(canvas,blur_image,IntensityCompositeOp,MagickTrue,
0,0,exception);
blur_image=DestroyImage(blur_image);
vignette_image=MergeImageLayers(canvas,FlattenLayer,exception);
canvas=DestroyImage(canvas);
if (vignette_image != (Image *) NULL)
(void) TransformImageColorspace(vignette_image,image->colorspace,exception);
return(vignette_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveImage() creates a "ripple" effect in the image by shifting the pixels
% vertically along a sine wave whose amplitude and wavelength is specified
% by the given parameters.
%
% The format of the WaveImage method is:
%
% Image *WaveImage(const Image *image,const double amplitude,
% const double wave_length,const PixelInterpolateMethod method,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o amplitude, wave_length: Define the amplitude and wave length of the
% sine wave.
%
% o interpolate: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *WaveImage(const Image *image,const double amplitude,
const double wave_length,const PixelInterpolateMethod method,
ExceptionInfo *exception)
{
#define WaveImageTag "Wave/Image"
CacheView
*canvas_image_view,
*wave_view;
float
*sine_map;
Image
*canvas_image,
*wave_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
/*
Initialize wave image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
canvas_image=CloneImage(image,0,0,MagickTrue,exception);
if (canvas_image == (Image *) NULL)
return((Image *) NULL);
if ((canvas_image->alpha_trait == UndefinedPixelTrait) &&
(canvas_image->background_color.alpha != OpaqueAlpha))
(void) SetImageAlpha(canvas_image,OpaqueAlpha,exception);
wave_image=CloneImage(canvas_image,canvas_image->columns,(size_t)
(canvas_image->rows+2.0*fabs(amplitude)),MagickTrue,exception);
if (wave_image == (Image *) NULL)
{
canvas_image=DestroyImage(canvas_image);
return((Image *) NULL);
}
if (SetImageStorageClass(wave_image,DirectClass,exception) == MagickFalse)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
return((Image *) NULL);
}
/*
Allocate sine map.
*/
sine_map=(float *) AcquireQuantumMemory((size_t) wave_image->columns,
sizeof(*sine_map));
if (sine_map == (float *) NULL)
{
canvas_image=DestroyImage(canvas_image);
wave_image=DestroyImage(wave_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i=0; i < (ssize_t) wave_image->columns; i++)
sine_map[i]=(float) fabs(amplitude)+amplitude*sin((double)
((2.0*MagickPI*i)*PerceptibleReciprocal(wave_length)));
/*
Wave image.
*/
status=MagickTrue;
progress=0;
canvas_image_view=AcquireVirtualCacheView(canvas_image,exception);
wave_view=AcquireAuthenticCacheView(wave_image,exception);
(void) SetCacheViewVirtualPixelMethod(canvas_image_view,
BackgroundVirtualPixelMethod);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(canvas_image,wave_image,wave_image->rows,1)
#endif
for (y=0; y < (ssize_t) wave_image->rows; y++)
{
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(canvas_image_view,0,y,canvas_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(wave_view,0,y,wave_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) wave_image->columns; x++)
{
status=InterpolatePixelChannels(canvas_image,canvas_image_view,
wave_image,method,(double) x,(double) (y-sine_map[x]),q,exception);
if (status == MagickFalse)
break;
p+=GetPixelChannels(canvas_image);
q+=GetPixelChannels(wave_image);
}
if (SyncCacheViewAuthenticPixels(wave_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(canvas_image,WaveImageTag,progress,
canvas_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
wave_view=DestroyCacheView(wave_view);
canvas_image_view=DestroyCacheView(canvas_image_view);
canvas_image=DestroyImage(canvas_image);
sine_map=(float *) RelinquishMagickMemory(sine_map);
if (status == MagickFalse)
wave_image=DestroyImage(wave_image);
return(wave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W a v e l e t D e n o i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WaveletDenoiseImage() removes noise from the image using a wavelet
% transform. The wavelet transform is a fast hierarchical scheme for
% processing an image using a set of consecutive lowpass and high_pass filters,
% followed by a decimation. This results in a decomposition into different
% scales which can be regarded as different “frequency bands”, determined by
% the mother wavelet. Adapted from dcraw.c by David Coffin.
%
% The format of the WaveletDenoiseImage method is:
%
% Image *WaveletDenoiseImage(const Image *image,const double threshold,
% const double softness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: set the threshold for smoothing.
%
% o softness: attenuate the smoothing threshold.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void HatTransform(const float *magick_restrict pixels,
const size_t stride,const size_t extent,const size_t scale,float *kernel)
{
const float
*magick_restrict p,
*magick_restrict q,
*magick_restrict r;
ssize_t
i;
p=pixels;
q=pixels+scale*stride;
r=pixels+scale*stride;
for (i=0; i < (ssize_t) scale; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q-=stride;
r+=stride;
}
for ( ; i < (ssize_t) (extent-scale); i++)
{
kernel[i]=0.25f*(2.0f*(*p)+*(p-scale*stride)+*(p+scale*stride));
p+=stride;
}
q=p-scale*stride;
r=pixels+stride*(extent-2);
for ( ; i < (ssize_t) extent; i++)
{
kernel[i]=0.25f*(*p+(*p)+(*q)+(*r));
p+=stride;
q+=stride;
r-=stride;
}
}
MagickExport Image *WaveletDenoiseImage(const Image *image,
const double threshold,const double softness,ExceptionInfo *exception)
{
CacheView
*image_view,
*noise_view;
float
*kernel,
*pixels;
Image
*noise_image;
MagickBooleanType
status;
MagickSizeType
number_pixels;
MemoryInfo
*pixels_info;
ssize_t
channel;
static const float
noise_levels[] = { 0.8002f, 0.2735f, 0.1202f, 0.0585f, 0.0291f, 0.0152f,
0.0080f, 0.0044f };
/*
Initialize noise image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
noise_image=AccelerateWaveletDenoiseImage(image,threshold,exception);
if (noise_image != (Image *) NULL)
return(noise_image);
#endif
noise_image=CloneImage(image,0,0,MagickTrue,exception);
if (noise_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(noise_image,DirectClass,exception) == MagickFalse)
{
noise_image=DestroyImage(noise_image);
return((Image *) NULL);
}
if (AcquireMagickResource(WidthResource,4*image->columns) == MagickFalse)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
pixels_info=AcquireVirtualMemory(3*image->columns,image->rows*
sizeof(*pixels));
kernel=(float *) AcquireQuantumMemory(MagickMax(image->rows,image->columns)+1,
GetOpenMPMaximumThreads()*sizeof(*kernel));
if ((pixels_info == (MemoryInfo *) NULL) || (kernel == (float *) NULL))
{
if (kernel != (float *) NULL)
kernel=(float *) RelinquishMagickMemory(kernel);
if (pixels_info != (MemoryInfo *) NULL)
pixels_info=RelinquishVirtualMemory(pixels_info);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(float *) GetVirtualMemoryBlob(pixels_info);
status=MagickTrue;
number_pixels=(MagickSizeType) image->columns*image->rows;
image_view=AcquireAuthenticCacheView(image,exception);
noise_view=AcquireAuthenticCacheView(noise_image,exception);
for (channel=0; channel < (ssize_t) GetPixelChannels(image); channel++)
{
ssize_t
i;
size_t
high_pass,
low_pass;
ssize_t
level,
y;
PixelChannel
pixel_channel;
PixelTrait
traits;
if (status == MagickFalse)
continue;
traits=GetPixelChannelTraits(image,(PixelChannel) channel);
if (traits == UndefinedPixelTrait)
continue;
pixel_channel=GetPixelChannelChannel(image,channel);
if ((pixel_channel != RedPixelChannel) &&
(pixel_channel != GreenPixelChannel) &&
(pixel_channel != BluePixelChannel))
continue;
/*
Copy channel from image to wavelet pixel array.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
const Quantum
*magick_restrict p;
ssize_t
x;
p=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
pixels[i++]=(float) p[channel];
p+=GetPixelChannels(image);
}
}
/*
Low pass filter outputs are called approximation kernel & high pass
filters are referred to as detail kernel. The detail kernel
have high values in the noisy parts of the signal.
*/
high_pass=0;
for (level=0; level < 5; level++)
{
double
magnitude;
ssize_t
x;
low_pass=(size_t) (number_pixels*((level & 0x01)+1));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
float
*magick_restrict p,
*magick_restrict q;
ssize_t
c;
p=kernel+id*image->columns;
q=pixels+y*image->columns;
HatTransform(q+high_pass,1,image->columns,((size_t) 1UL << level),p);
q+=low_pass;
for (c=0; c < (ssize_t) image->columns; c++)
*q++=(*p++);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,1) \
magick_number_threads(image,image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
float
*magick_restrict p,
*magick_restrict q;
ssize_t
r;
p=kernel+id*image->rows;
q=pixels+x+low_pass;
HatTransform(q,image->columns,image->rows,((size_t) 1UL << level),p);
for (r=0; r < (ssize_t) image->rows; r++)
{
*q=(*p++);
q+=image->columns;
}
}
/*
To threshold, each coefficient is compared to a threshold value and
attenuated / shrunk by some factor.
*/
magnitude=threshold*noise_levels[level];
for (i=0; i < (ssize_t) number_pixels; ++i)
{
pixels[high_pass+i]-=pixels[low_pass+i];
if (pixels[high_pass+i] < -magnitude)
pixels[high_pass+i]+=magnitude-softness*magnitude;
else
if (pixels[high_pass+i] > magnitude)
pixels[high_pass+i]-=magnitude-softness*magnitude;
else
pixels[high_pass+i]*=softness;
if (high_pass != 0)
pixels[i]+=pixels[high_pass+i];
}
high_pass=low_pass;
}
/*
Reconstruct image from the thresholded wavelet kernel.
*/
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
Quantum
*magick_restrict q;
ssize_t
x;
ssize_t
offset;
q=GetCacheViewAuthenticPixels(noise_view,0,y,noise_image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
break;
}
offset=GetPixelChannelOffset(noise_image,pixel_channel);
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
pixel;
pixel=(MagickRealType) pixels[i]+pixels[low_pass+i];
q[offset]=ClampToQuantum(pixel);
i++;
q+=GetPixelChannels(noise_image);
}
sync=SyncCacheViewAuthenticPixels(noise_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,AddNoiseImageTag,(MagickOffsetType)
channel,GetPixelChannels(image));
if (proceed == MagickFalse)
status=MagickFalse;
}
}
noise_view=DestroyCacheView(noise_view);
image_view=DestroyCacheView(image_view);
kernel=(float *) RelinquishMagickMemory(kernel);
pixels_info=RelinquishVirtualMemory(pixels_info);
if (status == MagickFalse)
noise_image=DestroyImage(noise_image);
return(noise_image);
}
|
enhance.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% EEEEE N N H H AAA N N CCCC EEEEE %
% E NN N H H A A NN N C E %
% EEE N N N HHHHH AAAAA N N N C EEE %
% E N NN H H A A N NN C E %
% EEEEE N N H H A A N N CCCC EEEEE %
% %
% %
% MagickCore Image Enhancement Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoGammaImage() extract the 'mean' from the image and adjust the image
% to try make set its gamma appropriatally.
%
% The format of the AutoGammaImage method is:
%
% MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoGammaImage(Image *image,
ExceptionInfo *exception)
{
double
gamma,
log_mean,
mean,
sans;
MagickStatusType
status;
register ssize_t
i;
log_mean=log(0.5);
if (image->channel_mask == DefaultChannels)
{
/*
Apply gamma correction equally across all given channels.
*/
(void) GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception));
}
/*
Auto-gamma each channel separately.
*/
status=MagickTrue;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ChannelType
channel_mask;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
channel_mask=SetImageChannelMask(image,(ChannelType) (1 << i));
status=GetImageMean(image,&mean,&sans,exception);
gamma=log(mean*QuantumScale)/log_mean;
status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception);
(void) SetImageChannelMask(image,channel_mask);
if (status == MagickFalse)
break;
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoLevelImage() adjusts the levels of a particular image channel by
% scaling the minimum and maximum values to the full quantum range.
%
% The format of the LevelImage method is:
%
% MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-level
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType AutoLevelImage(Image *image,
ExceptionInfo *exception)
{
return(MinMaxStretchImage(image,0.0,0.0,1.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B r i g h t n e s s C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BrightnessContrastImage() changes the brightness and/or contrast of an
% image. It converts the brightness and contrast parameters into slope and
% intercept and calls a polynomical function to apply to the image.
%
% The format of the BrightnessContrastImage method is:
%
% MagickBooleanType BrightnessContrastImage(Image *image,
% const double brightness,const double contrast,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o brightness: the brightness percent (-100 .. 100).
%
% o contrast: the contrast percent (-100 .. 100).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BrightnessContrastImage(Image *image,
const double brightness,const double contrast,ExceptionInfo *exception)
{
#define BrightnessContastImageTag "BrightnessContast/Image"
double
alpha,
coefficients[2],
intercept,
slope;
MagickBooleanType
status;
/*
Compute slope and intercept.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
alpha=contrast;
slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0));
if (slope < 0.0)
slope=0.0;
intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope);
coefficients[0]=slope;
coefficients[1]=intercept;
status=FunctionImage(image,PolynomialFunction,2,coefficients,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClutImage() replaces each color value in the given image, by using it as an
% index to lookup a replacement color value in a Color Look UP Table in the
% form of an image. The values are extracted along a diagonal of the CLUT
% image so either a horizontal or vertial gradient image can be used.
%
% Typically this is used to either re-color a gray-scale image according to a
% color gradient in the CLUT image, or to perform a freeform histogram
% (level) adjustment according to the (typically gray-scale) gradient in the
% CLUT image.
%
% When the 'channel' mask includes the matte/alpha transparency channel but
% one image has no such channel it is assumed that that image is a simple
% gray-scale image that will effect the alpha channel values, either for
% gray-scale coloring (with transparent or semi-transparent colors), or
% a histogram adjustment of existing alpha channel values. If both images
% have matte channels, direct and normal indexing is applied, which is rarely
% used.
%
% The format of the ClutImage method is:
%
% MagickBooleanType ClutImage(Image *image,Image *clut_image,
% const PixelInterpolateMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o clut_image: the color lookup table image for replacement color values.
%
% o method: the pixel interpolation method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image,
const PixelInterpolateMethod method,ExceptionInfo *exception)
{
#define ClutImageTag "Clut/Image"
CacheView
*clut_view,
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*clut_map;
register ssize_t
i;
ssize_t adjust,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(clut_image != (Image *) NULL);
assert(clut_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
(IsGrayColorspace(clut_image->colorspace) == MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map));
if (clut_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Clut image.
*/
status=MagickTrue;
progress=0;
adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1);
clut_view=AcquireVirtualCacheView(clut_image,exception);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
GetPixelInfo(clut_image,clut_map+i);
(void) InterpolatePixelInfo(clut_image,clut_view,method,
(double) i*(clut_image->columns-adjust)/MaxMap,(double) i*
(clut_image->rows-adjust)/MaxMap,clut_map+i,exception);
}
clut_view=DestroyCacheView(clut_view);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
PixelTrait
traits;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
traits=GetPixelChannelTraits(image,RedPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.red))].red;
traits=GetPixelChannelTraits(image,GreenPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.green))].green;
traits=GetPixelChannelTraits(image,BluePixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.blue))].blue;
traits=GetPixelChannelTraits(image,BlackPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.black))].black;
traits=GetPixelChannelTraits(image,AlphaPixelChannel);
if ((traits & UpdatePixelTrait) != 0)
pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum(
pixel.alpha))].alpha;
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ClutImage)
#endif
proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map);
if ((clut_image->alpha_trait != UndefinedPixelTrait) &&
((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0))
(void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o l o r D e c i s i o n L i s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ColorDecisionListImage() accepts a lightweight Color Correction Collection
% (CCC) file which solely contains one or more color corrections and applies
% the correction to the image. Here is a sample CCC file:
%
% <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2">
% <ColorCorrection id="cc03345">
% <SOPNode>
% <Slope> 0.9 1.2 0.5 </Slope>
% <Offset> 0.4 -0.5 0.6 </Offset>
% <Power> 1.0 0.8 1.5 </Power>
% </SOPNode>
% <SATNode>
% <Saturation> 0.85 </Saturation>
% </SATNode>
% </ColorCorrection>
% </ColorCorrectionCollection>
%
% which includes the slop, offset, and power for each of the RGB channels
% as well as the saturation.
%
% The format of the ColorDecisionListImage method is:
%
% MagickBooleanType ColorDecisionListImage(Image *image,
% const char *color_correction_collection,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o color_correction_collection: the color correction collection in XML.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ColorDecisionListImage(Image *image,
const char *color_correction_collection,ExceptionInfo *exception)
{
#define ColorDecisionListCorrectImageTag "ColorDecisionList/Image"
typedef struct _Correction
{
double
slope,
offset,
power;
} Correction;
typedef struct _ColorCorrection
{
Correction
red,
green,
blue;
double
saturation;
} ColorCorrection;
CacheView
*image_view;
char
token[MagickPathExtent];
ColorCorrection
color_correction;
const char
*content,
*p;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
*cdl_map;
register ssize_t
i;
ssize_t
y;
XMLTreeInfo
*cc,
*ccc,
*sat,
*sop;
/*
Allocate and initialize cdl maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (color_correction_collection == (const char *) NULL)
return(MagickFalse);
ccc=NewXMLTree((const char *) color_correction_collection,exception);
if (ccc == (XMLTreeInfo *) NULL)
return(MagickFalse);
cc=GetXMLTreeChild(ccc,"ColorCorrection");
if (cc == (XMLTreeInfo *) NULL)
{
ccc=DestroyXMLTree(ccc);
return(MagickFalse);
}
color_correction.red.slope=1.0;
color_correction.red.offset=0.0;
color_correction.red.power=1.0;
color_correction.green.slope=1.0;
color_correction.green.offset=0.0;
color_correction.green.power=1.0;
color_correction.blue.slope=1.0;
color_correction.blue.offset=0.0;
color_correction.blue.power=1.0;
color_correction.saturation=0.0;
sop=GetXMLTreeChild(cc,"SOPNode");
if (sop != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*offset,
*power,
*slope;
slope=GetXMLTreeChild(sop,"Slope");
if (slope != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(slope);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.slope=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.slope=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.slope=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
offset=GetXMLTreeChild(sop,"Offset");
if (offset != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(offset);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 1:
{
color_correction.green.offset=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.offset=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
power=GetXMLTreeChild(sop,"Power");
if (power != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(power);
p=(const char *) content;
for (i=0; (*p != '\0') && (i < 3); i++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
switch (i)
{
case 0:
{
color_correction.red.power=StringToDouble(token,(char **) NULL);
break;
}
case 1:
{
color_correction.green.power=StringToDouble(token,
(char **) NULL);
break;
}
case 2:
{
color_correction.blue.power=StringToDouble(token,
(char **) NULL);
break;
}
}
}
}
}
sat=GetXMLTreeChild(cc,"SATNode");
if (sat != (XMLTreeInfo *) NULL)
{
XMLTreeInfo
*saturation;
saturation=GetXMLTreeChild(sat,"Saturation");
if (saturation != (XMLTreeInfo *) NULL)
{
content=GetXMLTreeContent(saturation);
p=(const char *) content;
GetNextToken(p,&p,MagickPathExtent,token);
color_correction.saturation=StringToDouble(token,(char **) NULL);
}
}
ccc=DestroyXMLTree(ccc);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" Color Correction Collection:");
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.slope: %g",color_correction.red.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.offset: %g",color_correction.red.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.red.power: %g",color_correction.red.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.slope: %g",color_correction.green.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.offset: %g",color_correction.green.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.green.power: %g",color_correction.green.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.slope: %g",color_correction.blue.slope);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.offset: %g",color_correction.blue.offset);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.blue.power: %g",color_correction.blue.power);
(void) LogMagickEvent(TransformEvent,GetMagickModule(),
" color_correction.saturation: %g",color_correction.saturation);
}
cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map));
if (cdl_map == (PixelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
for (i=0; i <= (ssize_t) MaxMap; i++)
{
cdl_map[i].red=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.red.slope*i/MaxMap+
color_correction.red.offset,color_correction.red.power))));
cdl_map[i].green=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.green.slope*i/MaxMap+
color_correction.green.offset,color_correction.green.power))));
cdl_map[i].blue=(double) ScaleMapToQuantum((double)
(MaxMap*(pow(color_correction.blue.slope*i/MaxMap+
color_correction.blue.offset,color_correction.blue.power))));
}
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Apply transfer function to colormap.
*/
double
luma;
luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+
0.07217f*image->colormap[i].blue;
image->colormap[i].red=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma;
image->colormap[i].green=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma;
image->colormap[i].blue=luma+color_correction.saturation*cdl_map[
ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma;
}
/*
Apply transfer function to image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
luma;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+
0.07217f*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q);
SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q);
SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation*
(cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ColorDecisionListImageChannel)
#endif
proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag,
progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastImage() enhances the intensity differences between the lighter and
% darker elements of the image. Set sharpen to a MagickTrue to increase the
% image contrast otherwise the contrast is reduced.
%
% The format of the ContrastImage method is:
%
% MagickBooleanType ContrastImage(Image *image,
% const MagickBooleanType sharpen,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o exception: return any errors or warnings in this structure.
%
*/
static void Contrast(const int sign,double *red,double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Enhance contrast: dark color become darker, light color become lighter.
*/
assert(red != (double *) NULL);
assert(green != (double *) NULL);
assert(blue != (double *) NULL);
hue=0.0;
saturation=0.0;
brightness=0.0;
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)-
brightness);
if (brightness > 1.0)
brightness=1.0;
else
if (brightness < 0.0)
brightness=0.0;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
MagickExport MagickBooleanType ContrastImage(Image *image,
const MagickBooleanType sharpen,ExceptionInfo *exception)
{
#define ContrastImageTag "Contrast/Image"
CacheView
*image_view;
int
sign;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
sign=sharpen != MagickFalse ? 1 : -1;
if (image->storage_class == PseudoClass)
{
/*
Contrast enhance colormap.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
Contrast(sign,&red,&green,&blue);
image->colormap[i].red=(MagickRealType) red;
image->colormap[i].green=(MagickRealType) green;
image->colormap[i].blue=(MagickRealType) blue;
}
}
/*
Contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
blue,
green,
red;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
Contrast(sign,&red,&green,&blue);
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastImage)
#endif
proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o n t r a s t S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ContrastStretchImage() is a simple image enhancement technique that attempts
% to improve the contrast in an image by 'stretching' the range of intensity
% values it contains to span a desired range of values. It differs from the
% more sophisticated histogram equalization in that it can only apply a
% linear scaling function to the image pixel values. As a result the
% 'enhancement' is less harsh.
%
% The format of the ContrastStretchImage method is:
%
% MagickBooleanType ContrastStretchImage(Image *image,
% const char *levels,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o levels: Specify the levels where the black and white points have the
% range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.).
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ContrastStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color)))
#define ContrastStretchImageTag "ContrastStretch/Image"
CacheView
*image_view;
double
*black,
*histogram,
*stretch_map,
*white;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate histogram and stretch map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageGray(image,exception) != MagickFalse)
(void) SetImageColorspace(image,GRAYColorspace,exception);
black=(double *) AcquireQuantumMemory(GetPixelChannels(image),sizeof(*black));
white=(double *) AcquireQuantumMemory(GetPixelChannels(image),sizeof(*white));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL,
GetPixelChannels(image)*sizeof(*stretch_map));
if ((black == (double *) NULL) || (white == (double *) NULL) ||
(histogram == (double *) NULL) || (stretch_map == (double *) NULL))
{
if (stretch_map != (double *) NULL)
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (white != (double *) NULL)
white=(double *) RelinquishMagickMemory(white);
if (black != (double *) NULL)
black=(double *) RelinquishMagickMemory(black);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
pixel=GetPixelIntensity(image,p);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
if (image->channel_mask != DefaultChannels)
pixel=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum(pixel))+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black/white levels.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
black[i]=0.0;
white[i]=MaxRange(QuantumRange);
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > black_point)
break;
}
black[i]=(double) j;
intensity=0.0;
for (j=(ssize_t) MaxMap; j != 0; j--)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
if (intensity > ((double) image->columns*image->rows-white_point))
break;
}
white[i]=(double) j;
}
histogram=(double *) RelinquishMagickMemory(histogram);
/*
Stretch the histogram to create the stretched image mapping.
*/
(void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*stretch_map));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
gamma;
gamma=PerceptibleReciprocal(white[i]-black[i]);
if (j < (ssize_t) black[i])
stretch_map[GetPixelChannels(image)*j+i]=0.0;
else
if (j > (ssize_t) white[i])
stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange;
else
stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum(
(double) (MaxMap*gamma*(j-black[i])));
}
}
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Stretch-contrast colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,RedPixelChannel);
image->colormap[j].red=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,GreenPixelChannel);
image->colormap[j].green=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,BluePixelChannel);
image->colormap[j].blue=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
i=GetPixelChannelOffset(image,AlphaPixelChannel);
image->colormap[j].alpha=stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i];
}
}
}
/*
Stretch-contrast image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ContrastStretchImage)
#endif
proceed=SetImageProgress(image,ContrastStretchImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
stretch_map=(double *) RelinquishMagickMemory(stretch_map);
white=(double *) RelinquishMagickMemory(white);
black=(double *) RelinquishMagickMemory(black);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n h a n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EnhanceImage() applies a digital filter that improves the quality of a
% noisy image.
%
% The format of the EnhanceImage method is:
%
% Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception)
{
#define EnhanceImageTag "Enhance/Image"
#define EnhancePixel(weight) \
mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \
distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \
distance_squared=(4.0+mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \
distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \
distance_squared+=(7.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \
distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \
distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \
distance_squared+=(5.0-mean)*distance*distance; \
mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \
distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \
distance_squared+=(5.0-mean)*distance*distance; \
if (distance_squared < 0.069) \
{ \
aggregate.red+=(weight)*GetPixelRed(image,r); \
aggregate.green+=(weight)*GetPixelGreen(image,r); \
aggregate.blue+=(weight)*GetPixelBlue(image,r); \
aggregate.black+=(weight)*GetPixelBlack(image,r); \
aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \
total_weight+=(weight); \
} \
r+=GetPixelChannels(image);
CacheView
*enhance_view,
*image_view;
Image
*enhance_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Initialize enhanced image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (enhance_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse)
{
enhance_image=DestroyImage(enhance_image);
return((Image *) NULL);
}
/*
Enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
enhance_view=AcquireAuthenticCacheView(enhance_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,enhance_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
ssize_t
center;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception);
q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2);
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
double
distance,
distance_squared,
mean,
total_weight;
PixelInfo
aggregate;
register const Quantum
*magick_restrict r;
if (GetPixelWriteMask(image,p) == 0)
{
SetPixelBackgoundColor(enhance_image,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
continue;
}
GetPixelInfo(image,&aggregate);
total_weight=0.0;
GetPixelInfoPixel(image,p+center,&pixel);
r=p;
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
r=p+GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+2*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0);
EnhancePixel(40.0); EnhancePixel(10.0);
r=p+3*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0);
EnhancePixel(20.0); EnhancePixel(8.0);
r=p+4*GetPixelChannels(image)*(image->columns+4);
EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0);
EnhancePixel(8.0); EnhancePixel(5.0);
pixel.red=((aggregate.red+total_weight/2.0)/total_weight);
pixel.green=((aggregate.green+total_weight/2.0)/total_weight);
pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight);
pixel.black=((aggregate.black+total_weight/2.0)/total_weight);
pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight);
SetPixelViaPixelInfo(image,&pixel,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(enhance_image);
}
if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EnhanceImage)
#endif
proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
enhance_view=DestroyCacheView(enhance_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
enhance_image=DestroyImage(enhance_image);
return(enhance_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E q u a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EqualizeImage() applies a histogram equalization to the image.
%
% The format of the EqualizeImage method is:
%
% MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType EqualizeImage(Image *image,
ExceptionInfo *exception)
{
#define EqualizeImageTag "Equalize/Image"
CacheView
*image_view;
double
black[CompositePixelChannel+1],
*equalize_map,
*histogram,
*map,
white[CompositePixelChannel+1];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize histogram arrays.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateEqualizeImage(image,exception) != MagickFalse)
return(MagickTrue);
#endif
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL,
GetPixelChannels(image)*sizeof(*equalize_map));
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
map=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*map));
if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) ||
(map == (double *) NULL))
{
if (map != (double *) NULL)
map=(double *) RelinquishMagickMemory(map);
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (equalize_map != (double *) NULL)
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
/*
Form histogram.
*/
status=MagickTrue;
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
intensity=p[i];
if ((image->channel_mask & SyncChannels) != 0)
intensity=GetPixelIntensity(image,p);
histogram[GetPixelChannels(image)*ScaleQuantumToMap(intensity)+i]++;
}
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Integrate the histogram to get the equalization map.
*/
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
intensity;
register ssize_t
j;
intensity=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
{
intensity+=histogram[GetPixelChannels(image)*j+i];
map[GetPixelChannels(image)*j+i]=intensity;
}
}
(void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*equalize_map));
(void) ResetMagickMemory(black,0,sizeof(*black));
(void) ResetMagickMemory(white,0,sizeof(*white));
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
black[i]=map[i];
white[i]=map[GetPixelChannels(image)*MaxMap+i];
if (black[i] != white[i])
for (j=0; j <= (ssize_t) MaxMap; j++)
equalize_map[GetPixelChannels(image)*j+i]=(double)
ScaleMapToQuantum((double) ((MaxMap*(map[
GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i])));
}
histogram=(double *) RelinquishMagickMemory(histogram);
map=(double *) RelinquishMagickMemory(map);
if (image->storage_class == PseudoClass)
{
register ssize_t
j;
/*
Equalize colormap.
*/
for (j=0; j < (ssize_t) image->colors; j++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel=GetPixelChannelChannel(image,RedPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].red=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+
channel];
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel=GetPixelChannelChannel(image,
GreenPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].green=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+
channel];
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel=GetPixelChannelChannel(image,BluePixelChannel);
if (black[channel] != white[channel])
image->colormap[j].blue=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+
channel];
}
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
{
PixelChannel channel=GetPixelChannelChannel(image,
AlphaPixelChannel);
if (black[channel] != white[channel])
image->colormap[j].alpha=equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+
channel];
}
}
}
/*
Equalize image.
*/
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j]))
continue;
q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)*
ScaleQuantumToMap(q[j])+j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EqualizeImage)
#endif
proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
equalize_map=(double *) RelinquishMagickMemory(equalize_map);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G a m m a I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GammaImage() gamma-corrects a particular image channel. The same
% image viewed on different devices will have perceptual differences in the
% way the image's intensities are represented on the screen. Specify
% individual gamma levels for the red, green, and blue channels, or adjust
% all three with the gamma parameter. Values typically range from 0.8 to 2.3.
%
% You can also reduce the influence of a particular channel with a gamma
% value of 0.
%
% The format of the GammaImage method is:
%
% MagickBooleanType GammaImage(Image *image,const double gamma,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o level: the image gamma as a string (e.g. 1.6,1.2,1.0).
%
% o gamma: the image gamma.
%
*/
static inline double gamma_pow(const double value,const double gamma)
{
return(value < 0.0 ? value : pow(value,gamma));
}
MagickExport MagickBooleanType GammaImage(Image *image,const double gamma,
ExceptionInfo *exception)
{
#define GammaCorrectImageTag "GammaCorrect/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
*gamma_map;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize gamma maps.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (gamma == 1.0)
return(MagickTrue);
gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map));
if (gamma_map == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map));
if (gamma != 0.0)
for (i=0; i <= (ssize_t) MaxMap; i++)
gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/
MaxMap,1.0/gamma)));
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Gamma-correct colormap.
*/
#if !defined(MAGICKCORE_HDRI_SUPPORT)
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].red))];
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].green))];
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].blue))];
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap(
ClampToQuantum(image->colormap[i].alpha))];
#else
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].red,1.0/gamma);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].green,1.0/gamma);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].blue,1.0/gamma);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=QuantumRange*gamma_pow(QuantumScale*
image->colormap[i].alpha,1.0/gamma);
#endif
}
/*
Gamma-correct image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
#if !defined(MAGICKCORE_HDRI_SUPPORT)
q[j]=gamma_map[ScaleQuantumToMap(q[j])];
#else
q[j]=QuantumRange*gamma_pow(QuantumScale*q[j],1.0/gamma);
#endif
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GammaImage)
#endif
proceed=SetImageProgress(image,GammaCorrectImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map);
if (image->gamma != 0.0)
image->gamma*=gamma;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G r a y s c a l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GrayscaleImage() converts the image to grayscale.
%
% The format of the GrayscaleImage method is:
%
% MagickBooleanType GrayscaleImage(Image *image,
% const PixelIntensityMethod method ,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the pixel intensity method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GrayscaleImage(Image *image,
const PixelIntensityMethod method,ExceptionInfo *exception)
{
#define GrayscaleImageTag "Grayscale/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse)
{
image->intensity=method;
image->type=GrayscaleType;
return(SetImageColorspace(image,GRAYColorspace,exception));
}
#endif
/*
Grayscale image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
blue,
green,
red,
intensity;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
red=(MagickRealType) GetPixelRed(image,q);
green=(MagickRealType) GetPixelGreen(image,q);
blue=(MagickRealType) GetPixelBlue(image,q);
intensity=0.0;
switch (method)
{
case AveragePixelIntensityMethod:
{
intensity=(red+green+blue)/3.0;
break;
}
case BrightnessPixelIntensityMethod:
{
intensity=MagickMax(MagickMax(red,green),blue);
break;
}
case LightnessPixelIntensityMethod:
{
intensity=(MagickMin(MagickMin(red,green),blue)+
MagickMax(MagickMax(red,green),blue))/2.0;
break;
}
case MSPixelIntensityMethod:
{
intensity=(MagickRealType) (((double) red*red+green*green+
blue*blue)/3.0);
break;
}
case Rec601LumaPixelIntensityMethod:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec601LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.298839*red+0.586811*green+0.114350*blue;
break;
}
case Rec709LumaPixelIntensityMethod:
default:
{
if (image->colorspace == RGBColorspace)
{
red=EncodePixelGamma(red);
green=EncodePixelGamma(green);
blue=EncodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case Rec709LuminancePixelIntensityMethod:
{
if (image->colorspace == sRGBColorspace)
{
red=DecodePixelGamma(red);
green=DecodePixelGamma(green);
blue=DecodePixelGamma(blue);
}
intensity=0.212656*red+0.715158*green+0.072186*blue;
break;
}
case RMSPixelIntensityMethod:
{
intensity=(MagickRealType) (sqrt((double) red*red+green*green+
blue*blue)/sqrt(3.0));
break;
}
}
SetPixelGray(image,ClampToQuantum(intensity),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GrayscaleImage)
#endif
proceed=SetImageProgress(image,GrayscaleImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
image->intensity=method;
image->type=GrayscaleType;
return(SetImageColorspace(image,GRAYColorspace,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% H a l d C l u t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% HaldClutImage() applies a Hald color lookup table to the image. A Hald
% color lookup table is a 3-dimensional color cube mapped to 2 dimensions.
% Create it with the HALD coder. You can apply any color transformation to
% the Hald image and then use this method to apply the transform to the
% image.
%
% The format of the HaldClutImage method is:
%
% MagickBooleanType HaldClutImage(Image *image,Image *hald_image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image, which is replaced by indexed CLUT values
%
% o hald_image: the color lookup table image for replacement color values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType HaldClutImage(Image *image,
const Image *hald_image,ExceptionInfo *exception)
{
#define HaldClutImageTag "Clut/Image"
typedef struct _HaldInfo
{
double
x,
y,
z;
} HaldInfo;
CacheView
*hald_view,
*image_view;
double
width;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
size_t
cube_size,
length,
level;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(hald_image != (Image *) NULL);
assert(hald_image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Hald clut image.
*/
status=MagickTrue;
progress=0;
length=(size_t) MagickMin((MagickRealType) hald_image->columns,
(MagickRealType) hald_image->rows);
for (level=2; (level*level*level) < length; level++) ;
level*=level;
cube_size=level*level;
width=(double) hald_image->columns;
GetPixelInfo(hald_image,&zero);
hald_view=AcquireVirtualCacheView(hald_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
offset;
HaldInfo
point;
PixelInfo
pixel,
pixel1,
pixel2,
pixel3,
pixel4;
point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q);
point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q);
point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q);
offset=point.x+level*floor(point.y)+cube_size*floor(point.z);
point.x-=floor(point.x);
point.y-=floor(point.y);
point.z-=floor(point.z);
pixel1=zero;
(void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
pixel2=zero;
(void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
pixel3=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel3);
offset+=cube_size;
(void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset,width),floor(offset/width),&pixel1,exception);
(void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate,
fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception);
pixel4=zero;
CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha,
point.y,&pixel4);
pixel=zero;
CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha,
point.z,&pixel);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
SetPixelRed(image,ClampToQuantum(pixel.red),q);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelBlack(image,ClampToQuantum(pixel.black),q);
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_HaldClutImage)
#endif
proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
hald_view=DestroyCacheView(hald_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImage() adjusts the levels of a particular image channel by
% scaling the colors falling between specified white and black points to
% the full available quantum range.
%
% The parameters provided represent the black, and white points. The black
% point specifies the darkest color in the image. Colors darker than the
% black point are set to zero. White point specifies the lightest color in
% the image. Colors brighter than the white point are set to the maximum
% quantum value.
%
% If a '!' flag is given, map black and white colors to the given levels
% rather than mapping those levels to black and white. See
% LevelizeImage() below.
%
% Gamma specifies a gamma correction to apply to the image.
%
% The format of the LevelImage method is:
%
% MagickBooleanType LevelImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double LevelPixel(const double black_point,
const double white_point,const double gamma,const double pixel)
{
double
level_pixel,
scale;
if (fabs(white_point-black_point) < MagickEpsilon)
return(pixel);
scale=1.0/(white_point-black_point);
level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point),
1.0/gamma);
return(level_pixel);
}
MagickExport MagickBooleanType LevelImage(Image *image,const double black_point,
const double white_point,const double gamma,ExceptionInfo *exception)
{
#define LevelImageTag "Level/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].red));
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].green));
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].blue));
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point,
white_point,gamma,image->colormap[i].alpha));
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma,
(double) q[j]));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelImage)
#endif
proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
(void) ClampImage(image,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelizeImage() applies the reversed LevelImage() operation to just
% the specific channels specified. It compresses the full range of color
% values, so that they lie between the given black and white points. Gamma is
% applied before the values are mapped.
%
% LevelizeImage() can be called with by using a +level command line
% API option, or using a '!' on a -level or LevelImage() geometry string.
%
% It can be used to de-contrast a greyscale image to the exact levels
% specified. Or by using specific levels for each channel of an image you
% can convert a gray-scale image to any linear color gradient, according to
% those levels.
%
% The format of the LevelizeImage method is:
%
% MagickBooleanType LevelizeImage(Image *image,const double black_point,
% const double white_point,const double gamma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: The level to map zero (black) to.
%
% o white_point: The level to map QuantumRange (white) to.
%
% o gamma: adjust gamma by this factor before mapping values.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelizeImage(Image *image,
const double black_point,const double white_point,const double gamma,
ExceptionInfo *exception)
{
#define LevelizeImageTag "Levelize/Image"
#define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \
(QuantumScale*(x)),gamma))*(white_point-black_point)+black_point)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Level colormap.
*/
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(double) LevelizeValue(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(double) LevelizeValue(
image->colormap[i].alpha);
}
/*
Level image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=LevelizeValue(q[j]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_LevelizeImage)
#endif
proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L e v e l I m a g e C o l o r s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LevelImageColors() maps the given color to "black" and "white" values,
% linearly spreading out the colors, and level values on a channel by channel
% bases, as per LevelImage(). The given colors allows you to specify
% different level ranges for each of the color channels separately.
%
% If the boolean 'invert' is set true the image values will modifyed in the
% reverse direction. That is any existing "black" and "white" colors in the
% image will become the color values given, with all other values compressed
% appropriatally. This effectivally maps a greyscale gradient into the given
% color gradient.
%
% The format of the LevelImageColors method is:
%
% MagickBooleanType LevelImageColors(Image *image,
% const PixelInfo *black_color,const PixelInfo *white_color,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_color: The color to map black to/from
%
% o white_point: The color to map white to/from
%
% o invert: if true map the colors (levelize), rather than from (level)
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LevelImageColors(Image *image,
const PixelInfo *black_color,const PixelInfo *white_color,
const MagickBooleanType invert,ExceptionInfo *exception)
{
ChannelType
channel_mask;
MagickStatusType
status;
/*
Allocate and initialize levels map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((IsGrayColorspace(image->colorspace) != MagickFalse) &&
((IsGrayColorspace(black_color->colorspace) == MagickFalse) ||
(IsGrayColorspace(white_color->colorspace) == MagickFalse)))
(void) SetImageColorspace(image,sRGBColorspace,exception);
status=MagickTrue;
if (invert == MagickFalse)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
else
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,RedChannel);
status&=LevelizeImage(image,black_color->red,white_color->red,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,GreenChannel);
status&=LevelizeImage(image,black_color->green,white_color->green,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
channel_mask=SetImageChannelMask(image,BlueChannel);
status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
channel_mask=SetImageChannelMask(image,BlackChannel);
status&=LevelizeImage(image,black_color->black,white_color->black,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
channel_mask=SetImageChannelMask(image,AlphaChannel);
status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0,
exception);
(void) SetImageChannelMask(image,channel_mask);
}
}
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i n e a r S t r e t c h I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% LinearStretchImage() discards any pixels below the black point and above
% the white point and levels the remaining pixels.
%
% The format of the LinearStretchImage method is:
%
% MagickBooleanType LinearStretchImage(Image *image,
% const double black_point,const double white_point,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o black_point: the black point.
%
% o white_point: the white point.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType LinearStretchImage(Image *image,
const double black_point,const double white_point,ExceptionInfo *exception)
{
#define LinearStretchImageTag "LinearStretch/Image"
CacheView
*image_view;
double
*histogram,
intensity;
MagickBooleanType
status;
ssize_t
black,
white,
y;
/*
Allocate histogram and linear map.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Form histogram.
*/
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
intensity=GetPixelIntensity(image,p);
histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Find the histogram boundaries by locating the black and white point levels.
*/
intensity=0.0;
for (black=0; black < (ssize_t) MaxMap; black++)
{
intensity+=histogram[black];
if (intensity >= black_point)
break;
}
intensity=0.0;
for (white=(ssize_t) MaxMap; white != 0; white--)
{
intensity+=histogram[white];
if (intensity >= white_point)
break;
}
histogram=(double *) RelinquishMagickMemory(histogram);
status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black),
(double) ScaleMapToQuantum((MagickRealType) white),1.0,exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d u l a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModulateImage() lets you control the brightness, saturation, and hue
% of an image. Modulate represents the brightness, saturation, and hue
% as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the
% modulation is lightness, saturation, and hue. For HWB, use blackness,
% whiteness, and hue. And for HCL, use chrome, luma, and hue.
%
% The format of the ModulateImage method is:
%
% MagickBooleanType ModulateImage(Image *image,const char *modulate,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o modulate: Define the percent change in brightness, saturation, and hue.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ModulateHCL(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHCLp(const double percent_hue,
const double percent_chroma,const double percent_luma,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
chroma*=0.01*percent_chroma;
luma*=0.01*percent_luma;
ConvertHCLpToRGB(hue,chroma,luma,red,green,blue);
}
static inline void ModulateHSB(const double percent_hue,
const double percent_saturation,const double percent_brightness,double *red,
double *green,double *blue)
{
double
brightness,
hue,
saturation;
/*
Increase or decrease color brightness, saturation, or hue.
*/
ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
brightness*=0.01*percent_brightness;
ConvertHSBToRGB(hue,saturation,brightness,red,green,blue);
}
static inline void ModulateHSI(const double percent_hue,
const double percent_saturation,const double percent_intensity,double *red,
double *green,double *blue)
{
double
intensity,
hue,
saturation;
/*
Increase or decrease color intensity, saturation, or hue.
*/
ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
intensity*=0.01*percent_intensity;
ConvertHSIToRGB(hue,saturation,intensity,red,green,blue);
}
static inline void ModulateHSL(const double percent_hue,
const double percent_saturation,const double percent_lightness,double *red,
double *green,double *blue)
{
double
hue,
lightness,
saturation;
/*
Increase or decrease color lightness, saturation, or hue.
*/
ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
lightness*=0.01*percent_lightness;
ConvertHSLToRGB(hue,saturation,lightness,red,green,blue);
}
static inline void ModulateHSV(const double percent_hue,
const double percent_saturation,const double percent_value,double *red,
double *green,double *blue)
{
double
hue,
saturation,
value;
/*
Increase or decrease color value, saturation, or hue.
*/
ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
saturation*=0.01*percent_saturation;
value*=0.01*percent_value;
ConvertHSVToRGB(hue,saturation,value,red,green,blue);
}
static inline void ModulateHWB(const double percent_hue,
const double percent_whiteness,const double percent_blackness,double *red,
double *green,double *blue)
{
double
blackness,
hue,
whiteness;
/*
Increase or decrease color blackness, whiteness, or hue.
*/
ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness);
hue+=fmod((percent_hue-100.0),200.0)/200.0;
blackness*=0.01*percent_blackness;
whiteness*=0.01*percent_whiteness;
ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue);
}
static inline void ModulateLCHab(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHabToRGB(luma,chroma,hue,red,green,blue);
}
static inline void ModulateLCHuv(const double percent_luma,
const double percent_chroma,const double percent_hue,double *red,
double *green,double *blue)
{
double
hue,
luma,
chroma;
/*
Increase or decrease color luma, chroma, or hue.
*/
ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue);
luma*=0.01*percent_luma;
chroma*=0.01*percent_chroma;
hue+=fmod((percent_hue-100.0),200.0)/200.0;
ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue);
}
MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate,
ExceptionInfo *exception)
{
#define ModulateImageTag "Modulate/Image"
CacheView
*image_view;
ColorspaceType
colorspace;
const char
*artifact;
double
percent_brightness,
percent_hue,
percent_saturation;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickStatusType
flags;
register ssize_t
i;
ssize_t
y;
/*
Initialize modulate table.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (modulate == (char *) NULL)
return(MagickFalse);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
flags=ParseGeometry(modulate,&geometry_info);
percent_brightness=geometry_info.rho;
percent_saturation=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
percent_saturation=100.0;
percent_hue=geometry_info.xi;
if ((flags & XiValue) == 0)
percent_hue=100.0;
colorspace=UndefinedColorspace;
artifact=GetImageArtifact(image,"modulate:colorspace");
if (artifact != (const char *) NULL)
colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions,
MagickFalse,artifact);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
double
blue,
green,
red;
/*
Modulate image colormap.
*/
red=(double) image->colormap[i].red;
green=(double) image->colormap[i].green;
blue=(double) image->colormap[i].blue;
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSIColorspace:
{
ModulateHSI(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
image->colormap[i].red=red;
image->colormap[i].green=green;
image->colormap[i].blue=blue;
}
/*
Modulate image.
*/
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateModulateImage(image,percent_brightness,percent_hue,
percent_saturation,colorspace,exception) != MagickFalse)
return(MagickTrue);
#endif
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case HCLColorspace:
{
ModulateHCL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ModulateHCLp(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSBColorspace:
{
ModulateHSB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSLColorspace:
default:
{
ModulateHSL(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HSVColorspace:
{
ModulateHSV(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case HWBColorspace:
{
ModulateHWB(percent_hue,percent_saturation,percent_brightness,
&red,&green,&blue);
break;
}
case LCHabColorspace:
{
ModulateLCHab(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHuvColorspace:
{
ModulateLCHuv(percent_brightness,percent_saturation,percent_hue,
&red,&green,&blue);
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ModulateImage)
#endif
proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e g a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NegateImage() negates the colors in the reference image. The grayscale
% option means that only grayscale values within the image are negated.
%
% The format of the NegateImage method is:
%
% MagickBooleanType NegateImage(Image *image,
% const MagickBooleanType grayscale,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o grayscale: If MagickTrue, only negate grayscale pixels within the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NegateImage(Image *image,
const MagickBooleanType grayscale,ExceptionInfo *exception)
{
#define NegateImageTag "Negate/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
for (i=0; i < (ssize_t) image->colors; i++)
{
/*
Negate colormap.
*/
if( grayscale != MagickFalse )
if ((image->colormap[i].red != image->colormap[i].green) ||
(image->colormap[i].green != image->colormap[i].blue))
continue;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=QuantumRange-image->colormap[i].red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=QuantumRange-image->colormap[i].green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=QuantumRange-image->colormap[i].blue;
}
/*
Negate image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
if( grayscale != MagickFalse )
{
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if ((GetPixelWriteMask(image,q) == 0) ||
IsPixelGray(image,q) != MagickFalse)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImage)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(MagickTrue);
}
/*
Negate image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (j=0; j < (ssize_t) GetPixelChannels(image); j++)
{
PixelChannel channel=GetPixelChannelChannel(image,j);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[j]=QuantumRange-q[j];
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_NegateImage)
#endif
proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N o r m a l i z e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% The NormalizeImage() method enhances the contrast of a color image by
% mapping the darkest 2 percent of all pixel to black and the brightest
% 1 percent to white.
%
% The format of the NormalizeImage method is:
%
% MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType NormalizeImage(Image *image,
ExceptionInfo *exception)
{
double
black_point,
white_point;
black_point=(double) image->columns*image->rows*0.0015;
white_point=(double) image->columns*image->rows*0.9995;
return(ContrastStretchImage(image,black_point,white_point,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i g m o i d a l C o n t r a s t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SigmoidalContrastImage() adjusts the contrast of an image with a non-linear
% sigmoidal contrast algorithm. Increase the contrast of the image using a
% sigmoidal transfer function without saturating highlights or shadows.
% Contrast indicates how much to increase the contrast (0 is none; 3 is
% typical; 20 is pushing it); mid-point indicates where midtones fall in the
% resultant image (0 is white; 50% is middle-gray; 100% is black). Set
% sharpen to MagickTrue to increase the image contrast otherwise the contrast
% is reduced.
%
% The format of the SigmoidalContrastImage method is:
%
% MagickBooleanType SigmoidalContrastImage(Image *image,
% const MagickBooleanType sharpen,const char *levels,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o sharpen: Increase or decrease image contrast.
%
% o contrast: strength of the contrast, the larger the number the more
% 'threshold-like' it becomes.
%
% o midpoint: midpoint of the function as a color value 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
/*
ImageMagick 6 has a version of this function which uses LUTs.
*/
/*
Sigmoidal function Sigmoidal with inflexion point moved to b and "slope
constant" set to a.
The first version, based on the hyperbolic tangent tanh, when combined with
the scaling step, is an exact arithmetic clone of the the sigmoid function
based on the logistic curve. The equivalence is based on the identity
1/(1+exp(-t)) = (1+tanh(t/2))/2
(http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the
scaled sigmoidal derivation is invariant under affine transformations of
the ordinate.
The tanh version is almost certainly more accurate and cheaper. The 0.5
factor in the argument is to clone the legacy ImageMagick behavior. The
reason for making the define depend on atanh even though it only uses tanh
has to do with the construction of the inverse of the scaled sigmoidal.
*/
#if defined(MAGICKCORE_HAVE_ATANH)
#define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) )
#else
#define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) )
#endif
/*
Scaled sigmoidal function:
( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) /
( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) )
See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and
http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit
of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by
zero. This is fixed below by exiting immediately when contrast is small,
leaving the image (or colormap) unmodified. This appears to be safe because
the series expansion of the logistic sigmoidal function around x=b is
1/2-a*(b-x)/4+...
so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh).
*/
#define ScaledSigmoidal(a,b,x) ( \
(Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \
(Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) )
/*
Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b
may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic
sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even
when creating a LUT from in gamut values, hence the branching. In
addition, HDRI may have out of gamut values.
InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal:
It is only a right inverse. This is unavoidable.
*/
static inline double InverseScaledSigmoidal(const double a,const double b,
const double x)
{
const double sig0=Sigmoidal(a,b,0.0);
const double sig1=Sigmoidal(a,b,1.0);
const double argument=(sig1-sig0)*x+sig0;
const double clamped=
(
#if defined(MAGICKCORE_HAVE_ATANH)
argument < -1+MagickEpsilon
?
-1+MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b+(2.0/a)*atanh(clamped));
#else
argument < MagickEpsilon
?
MagickEpsilon
:
( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument )
);
return(b-log(1.0/clamped-1.0)/a);
#endif
}
MagickExport MagickBooleanType SigmoidalContrastImage(Image *image,
const MagickBooleanType sharpen,const double contrast,const double midpoint,
ExceptionInfo *exception)
{
#define SigmoidalContrastImageTag "SigmoidalContrast/Image"
#define ScaledSig(x) ( ClampToQuantum(QuantumRange* \
ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
#define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \
InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) )
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Convenience macros.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Side effect: may clamp values unless contrast<MagickEpsilon, in which
case nothing is done.
*/
if (contrast < MagickEpsilon)
return(MagickTrue);
/*
Sigmoidal-contrast enhance colormap.
*/
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
if( sharpen != MagickFalse )
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) ScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) ScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) ScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) ScaledSig(
image->colormap[i].alpha);
}
else
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].red=(MagickRealType) InverseScaledSig(
image->colormap[i].red);
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].green=(MagickRealType) InverseScaledSig(
image->colormap[i].green);
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].blue=(MagickRealType) InverseScaledSig(
image->colormap[i].blue);
if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)
image->colormap[i].alpha=(MagickRealType) InverseScaledSig(
image->colormap[i].alpha);
}
}
/*
Sigmoidal-contrast enhance image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if( sharpen != MagickFalse )
q[i]=ScaledSig(q[i]);
else
q[i]=InverseScaledSig(q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SigmoidalContrastImage)
#endif
proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
jacobi7_3_save.c | #define max(a,b) (((a) < (b))? (b) : (a))
#define min(a,b) (((a) < (b))? (a) : (b))
#define _TH_1 2
#include <omp.h>
#define Index3D(_nx,_ny,_i,_j,_k) ((_i)+_nx*((_j)+_ny*(_k)))
void jacobi7_3(const int nx,const int ny,int nz,const double alpha,double* A0,const int timesteps,const double* B,const int ldb,double* Anext,const int ldc) {
double fac;
double* temp_ptr;
int i;int j;int k;int t;
fac = 6.0/(A0[0]*A0[0]);
double* l0;double* lnext;
int k_bk_1;
int j_bk_2;
int i_bk_3;
/*@;BEGIN(Nest2_group3=Nest)@*/for (k_bk_1=1; k_bk_1<nz-1+16*timesteps; k_bk_1+=8)
{
omp_set_num_threads(_TH_1);
#pragma omp parallel
{
/*@;BEGIN(Nest1=Nest)@*/#pragma omp for private(t,k,j,i,l0,lnext,j_bk_2,i_bk_3)
for (t=max(0,(16+(k_bk_1-(nz-1)))/16); t<min(timesteps,(15+k_bk_1)/16); t+=1)
{
/*@;BEGIN(Nest3=Nest)@*/for (j_bk_2=1; j_bk_2<-1+ny; j_bk_2+=32)
{
/*@;BEGIN(Nest4=Nest)@*/for (i_bk_3=1; i_bk_3<-1+nx; i_bk_3+=32)
{
for (k=0; k<min(8,16*t+(-k_bk_1+(-1+nz))); k+=1)
{
for (j=0; j<min(32,-j_bk_2+(-1+ny)); j+=1)
{
for (i=0; i<min(32,-i_bk_3+(-1+nx)); i+=1)
{
if (t%2==0)
{
l0 = A0;
lnext = Anext;
}
else
{
lnext = A0;
l0 = Anext;
}
lnext[Index3D(nx,ny,i_bk_3+i,j_bk_2+j,k+(k_bk_1+-16*t))] = -(l0[Index3D(nx,ny,i_bk_3+i,j_bk_2+j,k+(k_bk_1+-16*t))]*fac)+(l0[Index3D(nx,ny,-1+(i_bk_3+i),j_bk_2+j,k+(k_bk_1+-16*t))]+(l0[Index3D(nx,ny,1+(i_bk_3+i),j_bk_2+j,k+(k_bk_1+-16*t))]+(l0[Index3D(nx,ny,i_bk_3+i,-1+(j_bk_2+j),k+(k_bk_1+-16*t))]+(l0[Index3D(nx,ny,i_bk_3+i,1+(j_bk_2+j),k+(k_bk_1+-16*t))]+(l0[Index3D(nx,ny,i_bk_3+i,j_bk_2+j,1+(k+(k_bk_1+-16*t)))]+l0[Index3D(nx,ny,i_bk_3+i,j_bk_2+j,-1+(k+(k_bk_1+-16*t)))])))));
}
}
}
}
}
}
}
}
}
|
copyprivate-clause.c | #include <stdio.h>
#include <omp.h>
main() {
int n = 9, i, b[n];
for (i=0; i<n; i++)
b[i] = -1;
#pragma omp parallel
{
int a;
//Al single solo entra una de las hebras en paralelo, y copyprivate copia el valor que
//tome esa varaible en el resto de hebras ejecutandose en paralelo
#pragma omp single
{
printf("\nIntroduce valor de inicialización a: ");
scanf("%d", &a);
printf("\nSingle ejecutada por el thread %d\n",omp_get_thread_num());
}
#pragma omp for
for (i=0; i<n; i++)
b[i] = a;
}
printf("Depués de la región parallel:\n");
for (i=0; i<n; i++)
printf("b[%d] = %d\t",i,b[i]);
printf("\n");
}
|
flag.c | #include "LBDefinitions.h"
#include "helper.h"
#include "flag.h"
#include "computeCellValues.h"
#include <omp.h>
void makeAvgDistFn(float * collideField, int * flagField, int * n, int * cell) {
/*
A GAS cell that is promoted to INTERFACE needs an initial distribution function, which
is calculated via f_eq(rho_avg, v_avg),
where v_avg, rho_avg are averaged from the neighboring FLUID and INTERFACE cells.
collideField An array of DFs for each cell in the domain, excluding boundary cells
n The dimensions of the domain, including boundary cells
cell The coordinates of the cell in need of a DF
*/
int i, neighbor[3], nNeighbors, flag;
float density, density_avg, velocity[D], velocity_avg[D], * cellDF, * neighborDF, nNeighbors_inv;
cellDF = getEl(collideField, cell, 0, n);
nNeighbors = 0;
density_avg = 0;
velocity_avg[0] = 0;
velocity_avg[1] = 0;
velocity_avg[2] = 0;
// for each i <- lattice direction
for (i = 0; i < Q; i++) {
// Retrieve coordinates of neighbor in direction i
neighbor[0] = cell[0] + LATTICEVELOCITIES[i][0];
neighbor[1] = cell[1] + LATTICEVELOCITIES[i][1];
neighbor[2] = cell[2] + LATTICEVELOCITIES[i][2];
// Do not overstep boundaries
if (neighbor[0] < 1 || neighbor[0] > n[0]-2 || neighbor[1] == 0 ||
neighbor[1] == n[1] || neighbor[2] == 0 || neighbor[2] == n[2]) {
continue;
}
flag = *getFlag(flagField, neighbor, n);
if (flag != FLUID && flag != INTERFACE) {
continue;
}
// Retrieve distribution function of that neighbor
neighborDF = getEl(collideField, neighbor, 0, n);
// Extract density, velocity from that neighbor
computeDensity(neighborDF, &density);
computeVelocity(neighborDF, &density, velocity);
nNeighbors++;
density_avg += density;
velocity_avg[0] += velocity[0];
velocity_avg[1] += velocity[1];
velocity_avg[2] += velocity[2];
}
nNeighbors_inv = 1.0 / nNeighbors;
density_avg *= nNeighbors_inv;
velocity_avg[0] *= nNeighbors_inv;
velocity_avg[1] *= nNeighbors_inv;
velocity_avg[2] *= nNeighbors_inv;
/* Set cell DF as f_eq with average velocity */
computeFeq(&density_avg, velocity_avg, cellDF);
}
/**
Remove cell from emptied cell list
*/
void removeFromEmptyList(int ** emptiedCells, int * nEmptied, int * targetCell) {
int j = 0;
while (j < *nEmptied && !(
emptiedCells[j][0] == targetCell[0] &&
emptiedCells[j][1] == targetCell[1] &&
emptiedCells[j][2] == targetCell[2])) {
j++;
}
// Mark element as deleted (since coordinates should be non-negative)
emptiedCells[j][0] = -1;
emptiedCells[j][0] = -1;
emptiedCells[j][0] = -1;
}
void performFill(float * collideField, int * flagField, int * n, int ** filledCells, int nFilled, int ** emptiedCells, int * nEmptied, int n_threads) {
/*
For collections of interface cells that get emptied or filled, examine the neighboring cells
and update their flags to maintain the integrity of the interface layer. For example, if a cell
is updated to FLUID, any neighboring GAS cells become INTERFACE.
collideField An array of DFs for each cell in the domain
flagField An array of flags <- {FLUID, INTERFACE, GAS, ...} for each cell in domain
n The dimensions of flagField
filledCells An ?x3 array containing coordinates of cells which have just been filled
nFilled The length of emptiedCells
emptiedCells An ?x3 array containing coordinates of cells which have just been emptied
nEmptied The length of emptiedCells
*/
int i, k, neighbor[3], *flag;
// for each k <- cell that has been updated
#pragma omp parallel for schedule(dynamic) private(i, neighbor, flag) num_threads(n_threads)
for (k = 0; k < nFilled; k++) {
// Update the cell's own flag
*getFlag(flagField, filledCells[k], n) = FLUID;
// Update each neighbor to ensure mesh integrity
for (i = 0; i < Q; i++) { // for each i <- lattice direction
// Retrieve coordinates of neighbor in direction i
neighbor[0] = filledCells[k][0] + LATTICEVELOCITIES[i][0];
neighbor[1] = filledCells[k][1] + LATTICEVELOCITIES[i][1];
neighbor[2] = filledCells[k][2] + LATTICEVELOCITIES[i][2];
// Check if neighbor is on the domain boundary, in which case we ignore it
// TODO: See if this actually makes things faster, if not, delete it.
// Unless someone sets the FLUID flag as a domain boundary condition,
// which would probably cause lots of problems, this won't do anything.
if (neighbor[0] == 0 || neighbor[0] == n[0] || neighbor[1] == 0 ||
neighbor[1] == n[1] || neighbor[2] == 0 || neighbor[2] == n[2]) {
continue;
}
// Retrieve the flag corresponding to neighbor
flag = getFlag(flagField, neighbor, n);
// If neighbor needs to be updated
if (*flag == GAS) {
*flag = INTERFACE;
// update distribution function from average of neighbors
makeAvgDistFn(collideField, flagField, n, neighbor);
// Remove this neighbor from 'empty' list
removeFromEmptyList(emptiedCells, nEmptied, neighbor);
}
}
}
}
void performEmpty(float * collideField, int * flagField, int * n, int ** updatedCells, int nUpdated, int n_threads) {
/*
For collections of interface cells that get emptied or filled, examine the neighboring cells
and update their flags to maintain the integrity of the interface layer. For example, if a cell
is updated to FLUID, any neighboring GAS cells become INTERFACE.
collideField An array of DFs for each cell in the domain
flagField An array of flags <- {FLUID, INTERFACE, GAS, ...} for each cell in domain
n The dimensions of flagField
updatedCells An ?x3 array containing coordinates of cells which have just been updated
nUpdated The length of updatedCells
*/
int i, k, neighbor[3], * flag;
#pragma omp parallel for schedule(dynamic) private(i, neighbor, flag) num_threads(n_threads)
// for each k <- cell that has been updated
for (k = 0; k < nUpdated; k++) {
if (updatedCells[k][0] == -1) continue;
* getFlag(flagField, updatedCells[k], n) = GAS;
// Heal interface by updating neighbors
for (i = 0; i < Q; i++) { // for each i <- lattice direction
// Retrieve coordinates of neighbor in direction i
neighbor[0] = updatedCells[k][0] + LATTICEVELOCITIES[i][0];
neighbor[1] = updatedCells[k][1] + LATTICEVELOCITIES[i][1];
neighbor[2] = updatedCells[k][2] + LATTICEVELOCITIES[i][2];
// Check if neighbor is on the domain boundary, in which case we ignore it
// TODO: See if this actually makes things faster, if not, delete it.
// Unless someone sets the FLUID flag as a domain boundary condition,
// which would probably cause lots of problems, this won't do anything.
if (neighbor[0] == 0 || neighbor[0] == n[0] || neighbor[1] == 0 ||
neighbor[1] == n[1] || neighbor[2] == 0 || neighbor[2] == n[2]) {
continue;
}
// Retrieve the flag corresponding to neighbor
flag = getFlag(flagField, neighbor, n);
// Swap out flag
if (*flag == FLUID) {
*flag = INTERFACE;
}
}
}
}
void updateFlagField(float * collideField, int * flagField, float * fractionField, int ** filledCells, int ** emptiedCells, int * length, int n_threads) {
int x, y, z, flag, nFilled = 0, nEmptied = 0;
int node[3];
float fraction, eps = 1e-3;
int n[3] = { length[0] + 2, length[1] + 2, length[2] + 2 };
/*
Updating flags for INTERFACE cells:
if fraction > 1 set flag to FLUID;
if fraction < 0 set flag to GAS.
Saving all emptied and filled cells to emptiedCells and filledCells arrays.
*/
for (z = 1; z <= length[2]; z++) {
node[2] = z;
for (y = 1; y <= length[1]; y++) {
node[1] = y;
for (x = 1; x <= length[0]; x++) {
node[0] = x;
flag = *getFlag(flagField, node, n);
/* We are interested only in INTERFACE cells now */
if (flag == INTERFACE) {
fraction = *getFraction(fractionField, node, n);
if (fraction > 1 + eps) {
filledCells[nFilled][0] = node[0];
filledCells[nFilled][1] = node[1];
filledCells[nFilled][2] = node[2];
nFilled++;
} else if (fraction < -eps) {
emptiedCells[nEmptied][0] = node[0];
emptiedCells[nEmptied][1] = node[1];
emptiedCells[nEmptied][2] = node[2];
nEmptied++;
}
}
}
}
}
// Update neighbors of filled and emptied cells in order to have closed interface layer
performFill(collideField, flagField, n, filledCells, nFilled, emptiedCells, &nEmptied, n_threads);
performEmpty(collideField, flagField, n, emptiedCells, nEmptied, n_threads);
}
|
2842.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp target teams distribute private(j)
for (i = 1; i < _PB_NI - 1; ++i)
{
#pragma omp
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 8;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
21_set_threads_in_program.c | #include <stdio.h>
#include <omp.h>
int main()
{
printf("Hello world from the main program\n");
omp_set_num_threads(4);
#pragma omp parallel
{
printf("Hello world! I am processor %i\n", omp_get_thread_num());
}
return 0;
}
|
ParticleBConds3DSoa.h | //////////////////////////////////////////////////////////////////////////////////////
// This file is distributed under the University of Illinois/NCSA Open Source License.
// See LICENSE file in top directory for details.
//
// Copyright (c) 2016 Jeongnim Kim and QMCPACK developers.
//
// File developed by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
// Amrita Mathuriya, amrita.mathuriya@intel.com, Intel Corp.
//
// File created by: Jeongnim Kim, jeongnim.kim@intel.com, Intel Corp.
//////////////////////////////////////////////////////////////////////////////////////
// -*- C++ -*-
#ifndef QMCPLUSPLUS_PARTICLE_BCONDS_3D_SOA_H
#define QMCPLUSPLUS_PARTICLE_BCONDS_3D_SOA_H
#include <config.h>
#include <algorithm>
#include <Lattice/CrystalLattice.h>
#include <OhmmsSoA/VectorSoaContainer.h>
namespace qmcplusplus
{
/** specialization for an open 3D
*/
template<class T>
struct DTD_BConds<T, 3, SUPERCELL_OPEN + SOA_OFFSET>
{
/** constructor: doing nothing */
inline DTD_BConds(const CrystalLattice<T, 3>& lat) {}
template<typename PT, typename RSoA>
void computeDistances(const PT& pos,
const RSoA& R0,
T* restrict temp_r,
RSoA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
dx[iat] = px[iat] - x0;
dy[iat] = py[iat] - y0;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
};
/** specialization for a periodic 3D, orthorombic cell
*/
template<class T>
struct DTD_BConds<T, 3, PPPO + SOA_OFFSET>
{
T Linv0, L0, Linv1, L1, Linv2, L2, r2max, dummy;
inline DTD_BConds(const CrystalLattice<T, 3>& lat)
: Linv0(lat.OneOverLength[0]),
L0(lat.Length[0]),
Linv1(lat.OneOverLength[1]),
L1(lat.Length[1]),
Linv2(lat.OneOverLength[2]),
L2(lat.Length[2]),
r2max(lat.CellRadiusSq),
dummy(T())
{}
template<typename PT, typename RSoA>
void computeDistances(const PT& pos,
const RSoA& R0,
T* restrict temp_r,
RSoA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
const T x = (px[iat] - x0) * Linv0;
const T y = (py[iat] - y0) * Linv1;
const T z = (pz[iat] - z0) * Linv2;
dx[iat] = L0 * (x - round(x));
dy[iat] = L1 * (y - round(y));
dz[iat] = L2 * (z - round(z));
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
};
/** specialization for a periodic 3D general cell with wigner-seitz==simulation cell
*
* Skip image cells.
*/
template<class T>
struct DTD_BConds<T, 3, PPPS + SOA_OFFSET>
{
T r00, r10, r20, r01, r11, r21, r02, r12, r22;
T g00, g10, g20, g01, g11, g21, g02, g12, g22;
DTD_BConds(const CrystalLattice<T, 3>& lat)
: r00(lat.R(0)),
r10(lat.R(3)),
r20(lat.R(6)),
r01(lat.R(1)),
r11(lat.R(4)),
r21(lat.R(7)),
r02(lat.R(2)),
r12(lat.R(5)),
r22(lat.R(8)),
g00(lat.G(0)),
g10(lat.G(3)),
g20(lat.G(6)),
g01(lat.G(1)),
g11(lat.G(4)),
g21(lat.G(7)),
g02(lat.G(2)),
g12(lat.G(5)),
g22(lat.G(8))
{}
template<typename PT, typename RSoA>
void computeDistances(const PT& pos,
const RSoA& R0,
T* restrict temp_r,
RSoA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T displ_0 = px[iat] - x0;
T displ_1 = py[iat] - y0;
T displ_2 = pz[iat] - z0;
T ar_0 = displ_0 * g00 + displ_1 * g10 + displ_2 * g20;
T ar_1 = displ_0 * g01 + displ_1 * g11 + displ_2 * g21;
T ar_2 = displ_0 * g02 + displ_1 * g12 + displ_2 * g22;
//put them in the box
ar_0 -= round(ar_0);
ar_1 -= round(ar_1);
ar_2 -= round(ar_2);
//unit2cart
dx[iat] = ar_0 * r00 + ar_1 * r10 + ar_2 * r20;
dy[iat] = ar_0 * r01 + ar_1 * r11 + ar_2 * r21;
dz[iat] = ar_0 * r02 + ar_1 * r12 + ar_2 * r22;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
};
/** specialization for a periodic 3D general cell
*
* Wigner-Seitz cell radius > simulation cell radius
* Need to check image cells
*/
template<class T>
struct DTD_BConds<T, 3, PPPG + SOA_OFFSET>
{
T g00, g10, g20, g01, g11, g21, g02, g12, g22;
T r00, r10, r20, r01, r11, r21, r02, r12, r22;
VectorSoaContainer<T, 3> corners;
DTD_BConds(const CrystalLattice<T, 3>& lat)
{
TinyVector<TinyVector<T, 3>, 3> rb;
rb[0] = lat.a(0);
rb[1] = lat.a(1);
rb[2] = lat.a(2);
find_reduced_basis(rb);
r00 = rb[0][0];
r10 = rb[1][0];
r20 = rb[2][0];
r01 = rb[0][1];
r11 = rb[1][1];
r21 = rb[2][1];
r02 = rb[0][2];
r12 = rb[1][2];
r22 = rb[2][2];
Tensor<T, 3> rbt;
for (int i = 0; i < 3; ++i)
for (int j = 0; j < 3; ++j)
rbt(i, j) = rb[i][j];
Tensor<T, 3> g = inverse(rbt);
g00 = g(0);
g10 = g(3);
g20 = g(6);
g01 = g(1);
g11 = g(4);
g21 = g(7);
g02 = g(2);
g12 = g(5);
g22 = g(8);
constexpr T minusone(-1);
constexpr T zero(0);
corners.resize(8);
corners(0) = zero;
corners(1) = minusone * (rb[0]);
corners(2) = minusone * (rb[1]);
corners(3) = minusone * (rb[2]);
corners(4) = minusone * (rb[0] + rb[1]);
corners(5) = minusone * (rb[0] + rb[2]);
corners(6) = minusone * (rb[1] + rb[2]);
corners(7) = minusone * (rb[0] + rb[1] + rb[2]);
}
template<typename PT, typename RSoA>
void computeDistances(const PT& pos,
const RSoA& R0,
T* restrict temp_r,
RSoA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
const T* restrict cellx = corners.data(0);
ASSUME_ALIGNED(cellx);
const T* restrict celly = corners.data(1);
ASSUME_ALIGNED(celly);
const T* restrict cellz = corners.data(2);
ASSUME_ALIGNED(cellz);
constexpr T minusone(-1);
constexpr T one(1);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
const T flip = iat < flip_ind ? one : minusone;
const T displ_0 = (px[iat] - x0) * flip;
const T displ_1 = (py[iat] - y0) * flip;
const T displ_2 = (pz[iat] - z0) * flip;
const T ar_0 = -std::floor(displ_0 * g00 + displ_1 * g10 + displ_2 * g20);
const T ar_1 = -std::floor(displ_0 * g01 + displ_1 * g11 + displ_2 * g21);
const T ar_2 = -std::floor(displ_0 * g02 + displ_1 * g12 + displ_2 * g22);
const T delx = displ_0 + ar_0 * r00 + ar_1 * r10 + ar_2 * r20;
const T dely = displ_1 + ar_0 * r01 + ar_1 * r11 + ar_2 * r21;
const T delz = displ_2 + ar_0 * r02 + ar_1 * r12 + ar_2 * r22;
T rmin = delx * delx + dely * dely + delz * delz;
int ic = 0;
#pragma unroll(7)
for (int c = 1; c < 8; ++c)
{
const T x = delx + cellx[c];
const T y = dely + celly[c];
const T z = delz + cellz[c];
const T r2 = x * x + y * y + z * z;
ic = (r2 < rmin) ? c : ic;
rmin = (r2 < rmin) ? r2 : rmin;
}
temp_r[iat] = std::sqrt(rmin);
dx[iat] = flip * (delx + cellx[ic]);
dy[iat] = flip * (dely + celly[ic]);
dz[iat] = flip * (delz + cellz[ic]);
}
}
};
/** specialization for a slab, general cell
*/
template<class T>
struct DTD_BConds<T, 3, PPNG + SOA_OFFSET>
{
T g00, g10, g01, g11;
T r00, r10, r01, r11;
TinyVector<TinyVector<T, 3>, 3> rb;
VectorSoaContainer<T, 3> corners;
DTD_BConds(const CrystalLattice<T, 3>& lat)
{
rb[0] = lat.a(0);
rb[1] = lat.a(1);
rb[2] = lat.a(2); //rb[2]=0.0;
r00 = rb[0][0];
r10 = rb[1][0];
r01 = rb[0][1];
r11 = rb[1][1];
g00 = lat.G(0);
g10 = lat.G(3);
g01 = lat.G(1);
g11 = lat.G(4);
T minusone = -1.0;
corners.resize(4);
corners(0) = 0.0;
corners(1) = minusone * (rb[0]);
corners(2) = minusone * (rb[1]);
corners(3) = minusone * (rb[0] + rb[1]);
}
template<typename PT, typename RSoA>
void computeDistances(const PT& pos,
const RSoA& R0,
T* restrict temp_r,
RSoA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
const T* restrict cellx = corners.data(0);
ASSUME_ALIGNED(cellx);
const T* restrict celly = corners.data(1);
ASSUME_ALIGNED(celly);
constexpr T minusone(-1);
constexpr T one(1);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
const T flip = iat < flip_ind ? one : minusone;
const T displ_0 = (px[iat] - x0) * flip;
const T displ_1 = (py[iat] - y0) * flip;
const T delz = pz[iat] - z0;
const T ar_0 = -std::floor(displ_0 * g00 + displ_1 * g10);
const T ar_1 = -std::floor(displ_0 * g01 + displ_1 * g11);
const T delx = displ_0 + ar_0 * r00 + ar_1 * r10;
const T dely = displ_1 + ar_0 * r01 + ar_1 * r11;
T rmin = delx * delx + dely * dely;
int ic = 0;
#pragma unroll(3)
for (int c = 1; c < 4; ++c)
{
const T x = delx + cellx[c];
const T y = dely + celly[c];
const T r2 = x * x + y * y;
ic = (r2 < rmin) ? c : ic;
rmin = (r2 < rmin) ? r2 : rmin;
}
temp_r[iat] = std::sqrt(rmin + delz * delz);
dx[iat] = flip * (delx + cellx[ic]);
dy[iat] = flip * (dely + celly[ic]);
dz[iat] = delz;
}
}
};
/** specialization for a slab, orthorombic cell
*/
template<class T>
struct DTD_BConds<T, 3, PPNO + SOA_OFFSET>
{
T Linv0, L0, Linv1, L1;
inline DTD_BConds(const CrystalLattice<T, 3>& lat)
: Linv0(lat.OneOverLength[0]), L0(lat.Length[0]), Linv1(lat.OneOverLength[1]), L1(lat.Length[1])
{}
template<typename PT, typename RSoA>
void computeDistances(const PT& pos,
const RSoA& R0,
T* restrict temp_r,
RSoA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T x = (px[iat] - x0) * Linv0;
dx[iat] = L0 * (x - round(x));
T y = (py[iat] - y0) * Linv1;
dy[iat] = L1 * (y - round(y));
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
};
/** specialization for a slab, general cell
*/
template<class T>
struct DTD_BConds<T, 3, PPNS + SOA_OFFSET>
{
T r00, r10, r01, r11;
T g00, g10, g01, g11;
DTD_BConds(const CrystalLattice<T, 3>& lat)
: r00(lat.R(0)),
r10(lat.R(3)),
r01(lat.R(1)),
r11(lat.R(4)),
g00(lat.G(0)),
g10(lat.G(3)),
g01(lat.G(1)),
g11(lat.G(4))
{}
template<typename PT, typename RSoA>
void computeDistances(const PT& pos,
const RSoA& R0,
T* restrict temp_r,
RSoA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T displ_0 = px[iat] - x0;
T displ_1 = py[iat] - y0;
T ar_0 = displ_0 * g00 + displ_1 * g10;
T ar_1 = displ_0 * g01 + displ_1 * g11;
//put them in the box
ar_0 -= round(ar_0);
ar_1 -= round(ar_1);
//unit2cart
dx[iat] = ar_0 * r00 + ar_1 * r10;
dy[iat] = ar_0 * r01 + ar_1 * r11;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
};
/** specialization for a wire
*/
template<class T>
struct DTD_BConds<T, 3, SUPERCELL_WIRE + SOA_OFFSET>
{
T Linv0, L0;
inline DTD_BConds(const CrystalLattice<T, 3>& lat) : Linv0(lat.OneOverLength[0]), L0(lat.Length[0]) {}
template<typename PT, typename RSoA>
void computeDistances(const PT& pos,
const RSoA& R0,
T* restrict temp_r,
RSoA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
const T x0 = pos[0];
const T y0 = pos[1];
const T z0 = pos[2];
const T* restrict px = R0.data(0);
const T* restrict py = R0.data(1);
const T* restrict pz = R0.data(2);
T* restrict dx = temp_dr.data(0);
T* restrict dy = temp_dr.data(1);
T* restrict dz = temp_dr.data(2);
#pragma omp simd aligned(temp_r, px, py, pz, dx, dy, dz)
for (int iat = first; iat < last; ++iat)
{
T x = (px[iat] - x0) * Linv0;
dx[iat] = L0 * (x - round(x));
dy[iat] = py[iat] - y0;
dz[iat] = pz[iat] - z0;
temp_r[iat] = std::sqrt(dx[iat] * dx[iat] + dy[iat] * dy[iat] + dz[iat] * dz[iat]);
}
}
};
/** specialization for a periodic 3D general cell
*
* Slow method and not used unless one needs to check if faster methods fail
*/
template<class T>
struct DTD_BConds<T, 3, PPPX + SOA_OFFSET>
{
T r00, r10, r20, r01, r11, r21, r02, r12, r22;
T g00, g10, g20, g01, g11, g21, g02, g12, g22;
T r2max;
VectorSoaContainer<T, 3> nextcells;
DTD_BConds(const CrystalLattice<T, 3>& lat)
: r00(lat.R(0)),
r10(lat.R(3)),
r20(lat.R(6)),
r01(lat.R(1)),
r11(lat.R(4)),
r21(lat.R(7)),
r02(lat.R(2)),
r12(lat.R(5)),
r22(lat.R(8)),
g00(lat.G(0)),
g10(lat.G(3)),
g20(lat.G(6)),
g01(lat.G(1)),
g11(lat.G(4)),
g21(lat.G(7)),
g02(lat.G(2)),
g12(lat.G(5)),
g22(lat.G(8)),
r2max(lat.CellRadiusSq)
{
nextcells.resize(26);
T* restrict cellx = nextcells.data(0);
T* restrict celly = nextcells.data(1);
T* restrict cellz = nextcells.data(2);
int ic = 0;
for (int i = -1; i <= 1; ++i)
for (int j = -1; j <= 1; ++j)
for (int k = -1; k <= 1; ++k)
{
if (i == 0 && j == 0 && j == 0)
continue; //exclude zero
cellx[ic] = i * r00 + j * r10 + k * r20;
celly[ic] = i * r01 + j * r11 + k * r21;
cellz[ic] = i * r02 + j * r12 + k * r22;
++ic;
}
}
template<typename PT, typename RSoA>
void computeDistances(const PT& pos,
const RSoA& R0,
T* restrict temp_r,
RSoA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
APP_ABORT("DTD_BConds<T,3,PPPX> not implemented");
}
};
/** specialization for a slab, general cell
*/
template<class T>
struct DTD_BConds<T, 3, PPNX + SOA_OFFSET>
{
T r00, r10, r01, r11;
T g00, g10, g01, g11;
T r2max;
VectorSoaContainer<T, 3> nextcells;
DTD_BConds(const CrystalLattice<T, 3>& lat)
: r00(lat.R(0)),
r10(lat.R(3)),
r01(lat.R(1)),
r11(lat.R(4)),
g00(lat.G(0)),
g10(lat.G(3)),
g01(lat.G(1)),
g11(lat.G(4)),
r2max(lat.CellRadiusSq)
{
nextcells.resize(8);
T* restrict cellx = nextcells.data(0);
T* restrict celly = nextcells.data(1);
T* restrict cellz = nextcells.data(2);
int ic = 0;
for (int i = -1; i <= 1; ++i)
for (int j = -1; j <= 1; ++j)
{
if (i == 0 && j == 0)
continue; //exclude zero
cellx[ic] = i * r00 + j * r10;
celly[ic] = i * r01 + j * r11;
cellz[ic] = T();
++ic;
}
}
template<typename PT, typename RSoA>
void computeDistances(const PT& pos,
const RSoA& R0,
T* restrict temp_r,
RSoA& temp_dr,
int first,
int last,
int flip_ind = 0)
{
APP_ABORT("DTD_BConds<T,3,PPNX> not implemented");
}
};
} // namespace qmcplusplus
#endif // OHMMS_PARTICLE_BCONDS_3D_H
|
3DConvolution.c | /**
* 3DConvolution.c: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <math.h>
#include <omp.h>
#define CL_USE_DEPRECATED_OPENCL_1_2_APIS
#ifdef __APPLE__
#include <OpenCL/opencl.h>
#else
#include <CL/cl.h>
#endif
#include "./polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 1.05
#define MAX_SOURCE_SIZE (0x100000)
/* Problem size */
#define NI 256
#define NJ 256
#define NK 256
/* Thread block dimensions */
#define DIM_LOCAL_WORK_GROUP_X 32
#define DIM_LOCAL_WORK_GROUP_Y 8
#if defined(cl_khr_fp64) // Khronos extension available?
#pragma OPENCL EXTENSION cl_khr_fp64 : enable
#elif defined(cl_amd_fp64) // AMD extension available?
#pragma OPENCL EXTENSION cl_amd_fp64 : enable
#endif
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
char str_temp[1024];
int cpu_offset = 0;
double total_time = 0;
cl_platform_id platform_id;
cl_device_id device_id;
cl_uint num_devices;
cl_uint num_platforms;
cl_int errcode;
cl_context clGPUContext;
cl_kernel clKernel;
cl_command_queue clCommandQue;
cl_program clProgram;
cl_mem a_mem_obj;
cl_mem b_mem_obj;
FILE *fp;
char *source_str;
size_t source_size;
void Convolution3D_omp(DATA_TYPE* A, DATA_TYPE* B, int ni, int nj, int nk, int i) {
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2;
c21 = +5;
c31 = -8;
c12 = -3;
c22 = +6;
c32 = -9;
c13 = +4;
c23 = +7;
c33 = +10;
#pragma omp parallel for
for (int k = 1; k < nk - 1; k++) {
for (int j = 1; j < nj - 1; j++) {
B[i * (nk * nj) + j * nk + k] = c11 * A[(i - 1) * (nk * nj) + (j - 1) * nk + (k - 1)] + c13 * A[(i + 1) * (nk * nj) + (j - 1) * nk + (k - 1)] + c21 * A[(i - 1) * (nk * nj) + (j - 1) * nk + (k - 1)] + c23 * A[(i + 1) * (nk * nj) + (j - 1) * nk + (k - 1)] + c31 * A[(i - 1) * (nk * nj) + (j - 1) * nk + (k - 1)] + c33 * A[(i + 1) * (nk * nj) + (j - 1) * nk + (k - 1)] + c12 * A[(i + 0) * (nk * nj) + (j - 1) * nk + (k + 0)] + c22 * A[(i + 0) * (nk * nj) + (j + 0) * nk + (k + 0)] + c32 * A[(i + 0) * (nk * nj) + (j + 1) * nk + (k + 0)] + c11 * A[(i - 1) * (nk * nj) + (j - 1) * nk + (k + 1)] + c13 * A[(i + 1) * (nk * nj) + (j - 1) * nk + (k + 1)] + c21 * A[(i - 1) * (nk * nj) + (j + 0) * nk + (k + 1)] + c23 * A[(i + 1) * (nk * nj) + (j + 0) * nk + (k + 1)] + c31 * A[(i - 1) * (nk * nj) + (j + 1) * nk + (k + 1)] + c33 * A[(i + 1) * (nk * nj) + (j + 1) * nk + (k + 1)];
}
}
}
void read_cl_file()
{
// Load the kernel source code into the array source_str
fp = fopen("3DConvolution.cl", "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.\n");
exit(1);
}
source_str = (char*)malloc(MAX_SOURCE_SIZE);
source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp);
fclose( fp );
}
void init(DATA_TYPE* A)
{
int i, j, k;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
for (k = 0; k < NK; ++k)
{
A[i*(NK * NJ) + j*NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13);
}
}
}
}
void cl_initialization()
{
// Get platform and device information
errcode = clGetPlatformIDs(1, &platform_id, &num_platforms);
if(errcode != CL_SUCCESS) printf("Error getting platform IDs\n");
errcode = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_GPU, 1, &device_id, &num_devices);
if(errcode != CL_SUCCESS) printf("Error getting device IDs\n");
// Create an OpenCL context
clGPUContext = clCreateContext( NULL, 1, &device_id, NULL, NULL, &errcode);
if(errcode != CL_SUCCESS) printf("Error in creating context\n");
//Create a command-queue
clCommandQue = clCreateCommandQueue(clGPUContext, device_id, 0, &errcode);
if(errcode != CL_SUCCESS) printf("Error in creating command queue\n");
}
void cl_mem_init(DATA_TYPE* A, DATA_TYPE* B)
{
a_mem_obj = clCreateBuffer(clGPUContext, CL_MEM_READ_ONLY, sizeof(DATA_TYPE) * NI * NJ * NK, NULL, &errcode);
b_mem_obj = clCreateBuffer(clGPUContext, CL_MEM_READ_WRITE, sizeof(DATA_TYPE) * NI * NJ * NK, NULL, &errcode);
if(errcode != CL_SUCCESS) printf("Error in creating buffers\n");
errcode = clEnqueueWriteBuffer(clCommandQue, a_mem_obj, CL_TRUE, 0, sizeof(DATA_TYPE) * NI * NJ * NK, A, 0, NULL, NULL);
errcode = clEnqueueWriteBuffer(clCommandQue, b_mem_obj, CL_TRUE, 0, sizeof(DATA_TYPE) * NI * NJ * NK, B, 0, NULL, NULL);
if(errcode != CL_SUCCESS)printf("Error in writing buffers\n");
}
void cl_load_prog()
{
// Create a program from the kernel source
clProgram = clCreateProgramWithSource(clGPUContext, 1, (const char **)&source_str, (const size_t *)&source_size, &errcode);
if(errcode != CL_SUCCESS) printf("Error in creating program\n");
// Build the program
errcode = clBuildProgram(clProgram, 1, &device_id, NULL, NULL, NULL);
if(errcode != CL_SUCCESS) printf("Error in building program\n");
// Create the OpenCL kernel
clKernel = clCreateKernel(clProgram, "Convolution3D_kernel", &errcode);
if(errcode != CL_SUCCESS) printf("Error in creating kernel\n");
clFinish(clCommandQue);
}
int cl_launch_kernel(DATA_TYPE* A, DATA_TYPE* B) {
double t_start, t_end;
int ni = NI;
int nj = NJ;
int nk = NK;
size_t localWorkSize[2], globalWorkSize[2];
localWorkSize[0] = DIM_LOCAL_WORK_GROUP_X;
localWorkSize[1] = DIM_LOCAL_WORK_GROUP_Y;
globalWorkSize[0] = (size_t)ceil(((float)NK) / ((float)DIM_LOCAL_WORK_GROUP_X)) * DIM_LOCAL_WORK_GROUP_X;
globalWorkSize[1] = (size_t)ceil(((float)NJ) / ((float)DIM_LOCAL_WORK_GROUP_Y)) * DIM_LOCAL_WORK_GROUP_Y;
bool cpu_run = false, gpu_run = false;
int cpu_ni = cpu_offset * NI / 100;
int gpu_ni = NI - cpu_ni - 1;
// printf("CPU ni: %d, GPU ni: %d\n", cpu_ni, gpu_ni);
if (cpu_ni > 0) {
cpu_run = true;
}
if (gpu_ni > 0) {
gpu_run = true;
}
cl_event eventList;
errcode = clFlush(clCommandQue);
errcode = clFinish(clCommandQue);
t_start = rtclock();
if (gpu_run) {
errcode = clSetKernelArg(clKernel, 0, sizeof(cl_mem), (void *)&a_mem_obj);
errcode |= clSetKernelArg(clKernel, 1, sizeof(cl_mem), (void *)&b_mem_obj);
errcode |= clSetKernelArg(clKernel, 2, sizeof(int), &ni);
errcode |= clSetKernelArg(clKernel, 3, sizeof(int), &nj);
errcode |= clSetKernelArg(clKernel, 4, sizeof(int), &nk);
if(errcode != CL_SUCCESS) printf("Error in seting arguments\n");
if (errcode != CL_SUCCESS)
printf("Error in seting arguments\n");
for (int i = 1; i < gpu_ni; ++i) {
errcode |= clSetKernelArg(clKernel, 5, sizeof(int), &i);
errcode = clEnqueueNDRangeKernel(clCommandQue, clKernel, 2, NULL, globalWorkSize, localWorkSize, 0, NULL, &eventList);
}
if (errcode != CL_SUCCESS)
printf("Error in launching kernel\n");
}
if (cpu_run) {
gpu_ni = gpu_ni < 1 ? 1: gpu_ni;
for (int i = gpu_ni; i < NI-1; ++i) {
Convolution3D_omp(A, B, ni, nj, nk, i);
}
}
clFlush(clCommandQue);
clFinish(clCommandQue);
t_end = rtclock();
total_time += 1000.0 * (t_end - t_start);
// fprintf(stdout, "GPU time: %lf ms\n", 1000.0 * (t_end - t_start));
return gpu_ni;
}
void cl_clean_up()
{
// Clean up
errcode = clFlush(clCommandQue);
errcode = clFinish(clCommandQue);
errcode = clReleaseKernel(clKernel);
errcode = clReleaseProgram(clProgram);
errcode = clReleaseMemObject(a_mem_obj);
errcode = clReleaseMemObject(b_mem_obj);
errcode = clReleaseCommandQueue(clCommandQue);
errcode = clReleaseContext(clGPUContext);
if(errcode != CL_SUCCESS) printf("Error in cleanup\n");
}
void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu)
{
int i, j, k, fail;
fail = 0;
// Compare result from cpu and gpu...
for (i = 3; i < NI - 3; ++i) // 0
{
for (j = 2; j < NJ - 1; ++j) // 1
{
for (k = 2; k < NK - 1; ++k) // 2
{
if (percentDiff(B[i*(NK * NJ) + j*NK + k], B_outputFromGpu[i*(NK * NJ) + j*NK + k]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
if (fail == 1)
printf("i, j, k: %d, %d, %d\n", i, j, k);
}
}
}
}
// Print results
printf("Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void conv3D(DATA_TYPE* A, DATA_TYPE* B)
{
int i, j, k;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +2; c21 = +5; c31 = -8;
c12 = -3; c22 = +6; c32 = -9;
c13 = +4; c23 = +7; c33 = +10;
for (i = 1; i < NI - 1; ++i) // 0
{
for (j = 1; j < NJ - 1; ++j) // 1
{
for (k = 1; k < NK -1; ++k) // 2
{
//printf("i:%d\nj:%d\nk:%d\n", i, j, k);
B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)]
+ c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)]
+ c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)]
+ c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)]
+ c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]
+ c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)];
}
}
}
}
int main(int argc, char* argv[]) {
if (argc != 2) {
printf("usage: 3D <number of cpu offset (0~100)>\n");
exit(0);
}
cpu_offset = atoi(argv[1]);
printf("CPU offset: %d\n", cpu_offset);
double t_start, t_end;
DATA_TYPE* A;
DATA_TYPE* B;
DATA_TYPE* B_outputFromGpu;
A = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
B = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE));
int i;
init(A);
read_cl_file();
cl_initialization();
cl_mem_init(A, B);
cl_load_prog();
int gpu_ni = cl_launch_kernel(A, B_outputFromGpu);
printf("Total time: %lf ms\n", total_time);
if (cpu_offset < 100){
errcode = clEnqueueReadBuffer(clCommandQue, b_mem_obj, CL_TRUE, 0, gpu_ni * NJ * NK * sizeof(DATA_TYPE), B_outputFromGpu, 0, NULL, NULL);
if(errcode != CL_SUCCESS) printf("Error in reading GPU mem\n");
}
conv3D(A, B);
compareResults(B, B_outputFromGpu);
cl_clean_up();
free(A);
free(B);
free(B_outputFromGpu);
return 0;
}
|
blake2bp-ref.c | /*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 4
static inline int blake2bp_init_leaf( blake2b_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset )
{
blake2b_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store64( &P->node_offset, offset );
P->node_depth = 0;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
static inline int blake2bp_init_root( blake2b_state *S, uint8_t outlen, uint8_t keylen )
{
blake2b_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store64( &P->node_offset, 0 );
P->node_depth = 1;
P->inner_length = BLAKE2B_OUTBYTES;
memset( P->reserved, 0, sizeof( P->reserved ) );
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2b_init_param( S, P );
}
int blake2bp_init( blake2bp_state *S, const uint8_t outlen )
{
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2bp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2bp_init_key( blake2bp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen )
{
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2B_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2bp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2bp_update( blake2bp_state *S, const uint8_t *in, uint64_t inlen )
{
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, BLAKE2B_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S->S[id__], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2bp_final( blake2bp_state *S, uint8_t *out, const uint8_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2B_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2B_BLOCKBYTES;
if( left > BLAKE2B_BLOCKBYTES ) left = BLAKE2B_BLOCKBYTES;
blake2b_update( S->S[i], S->buf + i * BLAKE2B_BLOCKBYTES, left );
}
blake2b_final( S->S[i], hash[i], BLAKE2B_OUTBYTES );
}
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S->R, hash[i], BLAKE2B_OUTBYTES );
return blake2b_final( S->R, out, outlen );
}
int blake2bp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2B_OUTBYTES];
blake2b_state S[PARALLELISM_DEGREE][1];
blake2b_state FS[1];
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if( NULL == key && keylen > 0 ) return -1;
if( !outlen || outlen > BLAKE2B_OUTBYTES ) return -1;
if( keylen > BLAKE2B_KEYBYTES ) return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2bp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node
if( keylen > 0 )
{
uint8_t block[BLAKE2B_BLOCKBYTES];
memset( block, 0, BLAKE2B_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( S[i], block, BLAKE2B_BLOCKBYTES );
secure_zero_memory( block, BLAKE2B_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2B_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES )
{
blake2b_update( S[id__], in__, BLAKE2B_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2B_BLOCKBYTES;
}
if( inlen__ > id__ * BLAKE2B_BLOCKBYTES )
{
const size_t left = inlen__ - id__ * BLAKE2B_BLOCKBYTES;
const size_t len = left <= BLAKE2B_BLOCKBYTES ? left : BLAKE2B_BLOCKBYTES;
blake2b_update( S[id__], in__, len );
}
blake2b_final( S[id__], hash[id__], BLAKE2B_OUTBYTES );
}
if( blake2bp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1; // Mark as last node
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2b_update( FS, hash[i], BLAKE2B_OUTBYTES );
return blake2b_final( FS, out, outlen );;
}
#if defined(BLAKE2BP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( int argc, char **argv )
{
uint8_t key[BLAKE2B_KEYBYTES];
uint8_t buf[KAT_LENGTH];
for( size_t i = 0; i < BLAKE2B_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2B_OUTBYTES];
blake2bp( hash, buf, key, BLAKE2B_OUTBYTES, i, BLAKE2B_KEYBYTES );
if( 0 != memcmp( hash, blake2bp_keyed_kat[i], BLAKE2B_OUTBYTES ) )
{
puts( "error" );
return -1;
}
}
puts( "ok" );
return 0;
}
#endif
|
GB_unop__identity_fc64_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fc64_fp32)
// op(A') function: GB (_unop_tran__identity_fc64_fp32)
// C type: GxB_FC64_t
// A type: float
// cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0)
// unaryop: cij = aij
#define GB_ATYPE \
float
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fc64_fp32)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fc64_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_bool_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_bool_uint32
// op(A') function: GB_tran__ainv_bool_uint32
// C type: bool
// A type: uint32_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_BOOL || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_bool_uint32
(
bool *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_bool_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.