source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
MiniBatch.h | //
// smarties
// Copyright (c) 2018 CSE-Lab, ETH Zurich, Switzerland. All rights reserved.
// Distributed under the terms of the MIT license.
//
// Created by Guido Novati (novatig@ethz.ch).
//
#ifndef smarties_MiniBatch_h
#define smarties_MiniBatch_h
#include "Episode.h"
namespace smarties
{
struct MiniBatch
{
const Uint size;
MiniBatch(const Uint _size) : size(_size)
{
episodes.resize(size);
begTimeStep.resize(size);
endTimeStep.resize(size);
sampledTimeStep.resize(size);
S.resize(size); R.resize(size); PERW.resize(size);
}
MiniBatch(MiniBatch && p) = default;
MiniBatch& operator=(MiniBatch && p) = delete;
MiniBatch(const MiniBatch &p) = delete;
MiniBatch& operator=(const MiniBatch &p) = delete;
std::vector<Episode*> episodes;
std::vector<Sint> begTimeStep;
std::vector<Sint> endTimeStep;
std::vector<Sint> sampledTimeStep;
Sint sampledBegStep(const Uint b) const { return begTimeStep[b]; }
Sint sampledEndStep(const Uint b) const { return endTimeStep[b]; }
Sint sampledTstep(const Uint b) const { return sampledTimeStep[b]; }
Sint sampledNumSteps(const Uint b) const {
assert(begTimeStep.size() > b);
assert(endTimeStep.size() > b);
return endTimeStep[b] - begTimeStep[b];
}
Sint mapTime2Ind(const Uint b, const Sint t) const
{
assert(begTimeStep.size() > b and begTimeStep[b] <= t);
//ind is mapping from time stamp along trajectoy and along alloc memory
return t - begTimeStep[b];
}
Sint mapInd2Time(const Uint b, const Sint k) const
{
assert(begTimeStep.size() > b);
//ind is mapping from time stamp along trajectoy and along alloc memory
return k + begTimeStep[b];
}
// episodes | time steps | dimensionality
std::vector< std::vector< NNvec > > S; // scaled state
std::vector< std::vector< Real > > R; // scaled reward
std::vector< std::vector< nnReal> > PERW; // prioritized sampling
Episode& getEpisode(const Uint b) const
{
return * episodes[b];
}
NNvec& state(const Uint b, const Sint t)
{
return S[b][mapTime2Ind(b, t)];
}
const NNvec& state(const Uint b, const Sint t) const
{
return S[b][mapTime2Ind(b, t)];
}
Real& reward(const Uint b, const Sint t)
{
return R[b][mapTime2Ind(b, t)];
}
const Real& reward(const Uint b, const Sint t) const
{
return R[b][mapTime2Ind(b, t)];
}
nnReal& PERweight(const Uint b, const Sint t)
{
return PERW[b][mapTime2Ind(b, t)];
}
const nnReal& PERweight(const Uint b, const Sint t) const
{
return PERW[b][mapTime2Ind(b, t)];
}
const Rvec& action(const Uint b, const Uint t) const
{
return episodes[b]->actions[t];
}
const Rvec& mu(const Uint b, const Uint t) const
{
return episodes[b]->policies[t];
}
nnReal& returnEstimate(const Uint b, const Uint t) const
{
return episodes[b]->returnEstimator[t];
}
std::vector<nnReal> returnEstimates(const Uint dt = 0) const
{
std::vector<nnReal> ret(size, 0);
for(Uint b=0; b<size; ++b) {
const auto t = sampledTstep(b);
assert(t >= (Sint) dt);
ret[b] = episodes[b]->returnEstimator[t-dt];
}
return ret;
}
nnReal& value(const Uint b, const Uint t) const
{
return episodes[b]->stateValue[t];
}
nnReal& advantage(const Uint b, const Uint t) const
{
return episodes[b]->actionAdvantage[t];
}
bool isTerminal(const Uint b, const Uint t) const
{
return episodes[b]->isTerminal(t);
}
std::vector<int> isNextTerminal() const // pybind will not like vector of bool
{
std::vector<int> ret(size, 0);
for(Uint b=0; b<size; ++b)
ret[b] = episodes[b]->isTerminal(sampledTstep(b) + 1);
return ret;
}
bool isTruncated(const Uint b, const Uint t) const
{
return episodes[b]->isTruncated(t);
}
std::vector<int> isNextTruncated() const //pybind will not like vector of bool
{
std::vector<int> ret(size, 0);
for(Uint b=0; b<size; ++b)
ret[b] = episodes[b]->isTruncated(sampledTstep(b)+1);
return ret;
}
Uint nTimeSteps(const Uint b) const
{
return episodes[b]->nsteps();
}
Uint nDataSteps(const Uint b) const //terminal/truncated state not actual data
{
return episodes[b]->ndata();
}
Uint indCurrStep(const Uint b=0) const
{
assert(episodes[b]->nsteps() > 0);
return episodes[b]->nsteps() - 1;
}
void setMseDklImpw(const Uint b, const Uint t, // batch id and time id
const Fval E, const Fval D, const Fval W, // error, dkl, offpol weight
const Fval C, const Fval invC) const // bounds of offpol weight
{
getEpisode(b).updateCumulative_atomic(t, E, D, W, C, invC);
}
void setValues(const Uint b, const Uint t, const Fval V) const
{
return setValues(b, t, V, V);
}
void setValues(const Uint b, const Uint t, const Fval V, const Fval Q) const
{
getEpisode(b).updateValues_atomic(t, V, Q);
}
void appendValues(const Fval V) const
{
return appendValues(V, V);
}
void appendValues(const Fval V, const Fval Q) const
{
getEpisode(0).stateValue.push_back(V);
getEpisode(0).actionAdvantage.push_back(Q-V);
assert(getEpisode(0).nsteps() == getEpisode(0).actionAdvantage.size());
assert(getEpisode(0).nsteps() == getEpisode(0).stateValue.size());
assert(size == 1 && "This should only be called by in-progress episodes");
}
template<typename T>
void setAllValues(const T& Vs, const T& Qs) const
{
assert(Vs.size() == size and Qs.size() == size);
#pragma omp parallel for schedule(static)
for(Uint b=0; b<size; ++b) setValues(b, sampledTstep(b), Vs[b], Qs[b]);
}
template<typename T>
void updateAllLastStepValues(const std::vector<T>& values) const
{
assert(values.size() == size);
#pragma omp parallel for schedule(static)
for(Uint b=0; b<size; ++b) {
const auto t = sampledTstep(b);
if( isTruncated(b, t+1) ) setValues(b, t+1, values[b]);
else if( isTerminal (b, t+1) ) setValues(b, t+1, 0);
}
}
template<typename T>
void setAllMseDklImpw(const std::vector<T>& deltaVal,
const std::vector<T>& DKLs,
const std::vector<T>& rhos,
const Fval C, const Fval invC) const
{
assert(deltaVal.size() ==size && DKLs.size() ==size && rhos.size() ==size);
for(Uint b=0; b<size; ++b)
setMseDklImpw(b, sampledTstep(b), deltaVal[b], DKLs[b], rhos[b], C,invC);
}
void resizeStep(const Uint b, const Uint nSteps)
{
assert( S.size()>b and R.size()>b);
S[b].resize(nSteps); R[b].resize(nSteps); PERW[b].resize(nSteps);
}
};
} // namespace smarties
#endif // smarties_MiniBatch_h
|
CT_OMP_TARGET_IMPL.c | /*
* _CT_OMP_TARGET_IMPL_C
*
* Copyright (C) 2017-2021 Tactical Computing Laboratories, LLC
* All Rights Reserved
* contact@tactcomplabs.com
*
* See LICENSE in the top level directory for licensing details
*/
#include <omp.h>
#include <stdint.h>
/* OpenMP Target Benchmark Implementations
*
* Benchmark implementations are in the form:
*
* void BENCHTYPE_ATOMTYPE( uint64_t *ARRAY, uint64_t *IDX,
* unsigned long long iters,
* unsigned long long pes )
*
*/
void RAND_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_fetch_add( &ARRAY[IDX[i]], (uint64_t)(0x1), __ATOMIC_RELAXED );
}
}
}
}
void RAND_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_compare_exchange_n( &ARRAY[IDX[i]], &ARRAY[IDX[i]], ARRAY[IDX[i]],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void STRIDE1_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_fetch_add( &ARRAY[i], (uint64_t)(0xF), __ATOMIC_RELAXED );
}
}
}
}
void STRIDE1_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_compare_exchange_n( &ARRAY[i], &ARRAY[i], ARRAY[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void STRIDEN_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes,
uint64_t stride ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes, stride)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads) * stride) );
for( i=start; i<(start+iters_per_thread); i+=stride ){
__atomic_fetch_add( &ARRAY[i], (uint64_t)(0xF), __ATOMIC_RELAXED );
}
}
}
}
void STRIDEN_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes,
uint64_t stride ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes, stride)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads) * stride) );
for( i=start; i<(start+iters_per_thread); i+=stride ){
__atomic_compare_exchange_n( &ARRAY[i], &ARRAY[i], ARRAY[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void PTRCHASE_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=0; i<iters_per_thread; i++ ){
start = __atomic_fetch_add( &IDX[start],
(uint64_t)(0x00ull),
__ATOMIC_RELAXED );
}
}
}
}
void PTRCHASE_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=0; i<iters_per_thread; i++ ){
__atomic_compare_exchange_n( &IDX[start], &start, IDX[start],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void SG_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
uint64_t src = 0;
uint64_t dest = 0;
uint64_t val = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
src = __atomic_fetch_add( &IDX[i], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
dest = __atomic_fetch_add( &IDX[i+1], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
val = __atomic_fetch_add( &ARRAY[src], (uint64_t)(0x01ull), __ATOMIC_RELAXED );
__atomic_fetch_add( &ARRAY[dest], val, __ATOMIC_RELAXED );
}
}
}
}
void SG_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
uint64_t src = 0;
uint64_t dest = 0;
uint64_t val = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_compare_exchange_n( &IDX[i], &src, IDX[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &IDX[i+1], &dest, IDX[i+1],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[src], &val, ARRAY[src],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[dest], &ARRAY[dest], val,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void CENTRAL_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
for( i=0; i<iters_per_thread; i++ ){
__atomic_fetch_add( &ARRAY[0], (uint64_t)(0x1), __ATOMIC_RELAXED );
}
}
}
}
void CENTRAL_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
for( i=0; i<iters_per_thread; i++ ){
__atomic_compare_exchange_n( &ARRAY[0], &ARRAY[0], ARRAY[0],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void SCATTER_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
uint64_t dest = 0;
uint64_t val = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
dest = __atomic_fetch_add( &IDX[i+1], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
val = __atomic_fetch_add( &ARRAY[i], (uint64_t)(0x01ull), __ATOMIC_RELAXED );
__atomic_fetch_add( &ARRAY[dest], val, __ATOMIC_RELAXED );
}
}
}
}
void SCATTER_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
uint64_t dest = 0;
uint64_t val = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_compare_exchange_n( &IDX[i+1], &dest, IDX[i+1],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[i], &val, ARRAY[i],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[dest], &ARRAY[dest], val,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
void GATHER_ADD( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
uint64_t dest = 0;
uint64_t val = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
dest = __atomic_fetch_add( &IDX[i+1], (uint64_t)(0x00ull), __ATOMIC_RELAXED );
val = __atomic_fetch_add( &ARRAY[dest], (uint64_t)(0x01ull), __ATOMIC_RELAXED );
__atomic_fetch_add( &ARRAY[i], val, __ATOMIC_RELAXED );
}
}
}
}
void GATHER_CAS( uint64_t *restrict ARRAY,
uint64_t *restrict IDX,
uint64_t iters,
uint64_t pes ){
#pragma omp target teams num_teams(pes) is_device_ptr(ARRAY, IDX) map(to:iters, pes)
{
#pragma omp parallel
{
uint64_t i = 0;
uint64_t dest = 0;
uint64_t val = 0;
// Divide iters across number of threads per team & set start
uint64_t num_threads = (uint64_t) omp_get_num_threads();
uint64_t iters_per_thread = (omp_get_thread_num() == num_threads - 1) ?
(iters / num_threads) + (iters % num_threads) :
(iters / num_threads);
uint64_t start = (uint64_t) ( (omp_get_team_num() * iters) +
(omp_get_thread_num() * (iters/num_threads)) );
for( i=start; i<(start+iters_per_thread); i++ ){
__atomic_compare_exchange_n( &IDX[i+1], &dest, IDX[i+1],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[dest], &val, ARRAY[dest],
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
__atomic_compare_exchange_n( &ARRAY[i], &ARRAY[i], val,
0, __ATOMIC_RELAXED, __ATOMIC_RELAXED );
}
}
}
}
/* EOF */
|
mbar_log_wi_jn.c | #include "mex.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
/* inputs */
double *N_k_pr;
size_t *N_k;
double *f_k;
double *u_kln;
double *u_kn;
double *K_pr;
size_t K;
double *N_max_pr;
size_t N_max;
/* outputs */
double *log_wi_jn;
/* working variables */
size_t i, j;
size_t k, l;
size_t n;
size_t mrows;
size_t ncols;
double *FlogN;
double *log_term;
double term_sum;
double max_log_term;
double log_sum;
double u_l, u_k;
/* check inputs and outputs */
if ( nrhs < 6 ) {
mexErrMsgTxt("MEX: Not enough input arguments.");
}
/* get inputs */
N_k_pr = mxGetPr(prhs[0]);
f_k = mxGetPr(prhs[1]);
u_kln = mxGetPr(prhs[2]);
u_kn = mxGetPr(prhs[3]);
K_pr = mxGetPr(prhs[4]);
N_max_pr = mxGetPr(prhs[5]);
K = (size_t) (K_pr[0] + 0.5);
N_max = (size_t) (N_max_pr[0] + 0.5);
N_k = (size_t *) malloc(K*sizeof(size_t));
for (k = 0; k < K; k++) {
N_k[k] = (size_t) (N_k_pr[k] + 0.5);
}
#ifdef DEBUG
mexPrintf("MEX: K = %zu\n", K);
mexPrintf("MEX: N_max = %zu\n", N_max);
#endif
/* setup: working variables */
FlogN = (double *) malloc(K*sizeof(double));
for (k = 0; k < K; k++) {
FlogN[k] = log((double)N_k[k])+f_k[k];
}
/* allocate output variables */
plhs[0] = mxCreateDoubleMatrix((mwSize) K, (mwSize) N_max, mxREAL);
log_wi_jn = mxGetPr(plhs[0]);
for (k = 0; k < K; k++) {
for (n = 0; n < N_max; n++) {
log_wi_jn[k + n*K] = 0.0;
}
}
/* calculation */
#pragma omp parallel \
default(none) \
private(k, n, l, max_log_term, u_k, u_l, log_term, term_sum, log_sum) \
shared(K, N_k, u_kln, u_kn, FlogN, log_wi_jn)
{
log_term = (double *) malloc(K*sizeof(double));
#pragma omp for
for (k = 0; k < K; k++) {
for (n = 0; n < N_k[k]; n++) {
max_log_term = -1e100;
u_k = u_kn[k + n*K];
for (l = 0; l < K; l++) {
u_l = u_kln[k + l*K + n*K*K];
log_term[l] = FlogN[l] - (u_l - u_k);
if (log_term[l] > max_log_term) {max_log_term = log_term[l];}
}
term_sum = 0.0;
for (l = 0; l < K; l++) {
term_sum += exp(log_term[l]-max_log_term);
}
log_sum = log(term_sum) + max_log_term;
log_wi_jn[k + n*K] = -log_sum;
}
}
free(log_term);
}
if (N_k != NULL) {
free(N_k);
}
if (FlogN != NULL) {
free(FlogN);
}
/* exit(EXIT_SUCCESS); */
}
|
testing.c | /* Generated by Cython 0.29.21 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [
"C:\\Users\\yoann\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\hsv\\hsv_c.c"
],
"extra_compile_args": [
"/Qpar",
"/fp:fast",
"/O2",
"/Oy",
"/Ot"
],
"include_dirs": [
"C:\\Users\\yoann\\AppData\\Local\\Programs\\Python\\Python36\\lib\\site-packages\\hsv",
"."
],
"language": "c",
"name": "HSV.testing",
"sources": [
"testing.pyx"
]
},
"module_name": "HSV.testing"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_21"
#define CYTHON_HEX_VERSION 0x001D15F0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_MARK_ERR_POS(f_index, lineno) \
{ __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__HSV__testing
#define __PYX_HAVE_API__HSV__testing
/* Early includes */
#include <math.h>
#include <string.h>
#include <stdio.h>
#include "hsv_c.c"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
static const char *__pyx_f[] = {
"testing.pyx",
};
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/*--- Type declarations ---*/
/* "hsv.pxd":25
* rgb struct_hsv_to_rgb(double h, double s, double v)nogil
*
* ctypedef hsv HSV_ # <<<<<<<<<<<<<<
* ctypedef rgb RGB_
*
*/
typedef struct hsv __pyx_t_3HSV_3hsv_HSV_;
/* "hsv.pxd":26
*
* ctypedef hsv HSV_
* ctypedef rgb RGB_ # <<<<<<<<<<<<<<
*
*
*/
typedef struct rgb __pyx_t_3HSV_3hsv_RGB_;
struct __pyx_opt_args_3HSV_7testing_rgb_to_hsv_testing;
/* "testing.pyx":32
*
*
* cpdef void rgb_to_hsv_testing(bint wall_ = False, double tolerance_ = TOLERANCE)nogil: # <<<<<<<<<<<<<<
* """
* TEST RGB TO HSV AND HSV TO RGB
*/
struct __pyx_opt_args_3HSV_7testing_rgb_to_hsv_testing {
int __pyx_n;
int wall_;
double tolerance_;
};
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* WriteUnraisableException.proto */
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename,
int full_traceback, int nogil);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'libc.math' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from 'HSV.hsv' */
/* Module declarations from 'HSV.testing' */
static double __pyx_v_3HSV_7testing_ONE_255;
static CYTHON_INLINE void __pyx_f_3HSV_7testing_show_error(unsigned int, unsigned int, unsigned int, __pyx_t_3HSV_3hsv_RGB_); /*proto*/
static void __pyx_f_3HSV_7testing_rgb_to_hsv_testing(int __pyx_skip_dispatch, struct __pyx_opt_args_3HSV_7testing_rgb_to_hsv_testing *__pyx_optional_args); /*proto*/
#define __Pyx_MODULE_NAME "HSV.testing"
extern int __pyx_module_is_main_HSV__testing;
int __pyx_module_is_main_HSV__testing = 0;
/* Implementation of 'HSV.testing' */
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ValueError;
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_name[] = "__name__";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_wall[] = "wall_";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_tolerance[] = "tolerance_";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_Mismatch_error[] = "\nMismatch error";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static PyObject *__pyx_kp_s_Mismatch_error;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_n_s_tolerance;
static PyObject *__pyx_n_s_wall;
static PyObject *__pyx_pf_3HSV_7testing_rgb_to_hsv_testing(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_wall_, double __pyx_v_tolerance_); /* proto */
static PyObject *__pyx_tuple_;
/* Late includes */
/* "testing.pyx":17
*
*
* cdef inline void show_error(unsigned int i, unsigned int j, unsigned int k, RGB_ rgb_): # <<<<<<<<<<<<<<
* """
*
*/
static CYTHON_INLINE void __pyx_f_3HSV_7testing_show_error(unsigned int __pyx_v_i, unsigned int __pyx_v_j, unsigned int __pyx_v_k, __pyx_t_3HSV_3hsv_RGB_ __pyx_v_rgb_) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("show_error", 0);
/* "testing.pyx":26
* :return: void
* """
* printf("\nOriginal RGB R:%d G:%d B:%d :", i, j, k) # <<<<<<<<<<<<<<
* printf("\nRetrieve RGB R:%f G:%f B:%f :", rgb_.r * 255.0, rgb_.g * 255.0, rgb_.b * 255.0)
* printf("\ndiff RGB dR:%g dG:%g dB:%g :\n",
*/
(void)(printf(((char const *)"\nOriginal RGB R:%d G:%d B:%d :"), __pyx_v_i, __pyx_v_j, __pyx_v_k));
/* "testing.pyx":27
* """
* printf("\nOriginal RGB R:%d G:%d B:%d :", i, j, k)
* printf("\nRetrieve RGB R:%f G:%f B:%f :", rgb_.r * 255.0, rgb_.g * 255.0, rgb_.b * 255.0) # <<<<<<<<<<<<<<
* printf("\ndiff RGB dR:%g dG:%g dB:%g :\n",
* rgb_.r * - <double>i, rgb_.g * 255.0 - <double>j, rgb_.b * 255.0 - <double>k)
*/
(void)(printf(((char const *)"\nRetrieve RGB R:%f G:%f B:%f :"), (__pyx_v_rgb_.r * 255.0), (__pyx_v_rgb_.g * 255.0), (__pyx_v_rgb_.b * 255.0)));
/* "testing.pyx":28
* printf("\nOriginal RGB R:%d G:%d B:%d :", i, j, k)
* printf("\nRetrieve RGB R:%f G:%f B:%f :", rgb_.r * 255.0, rgb_.g * 255.0, rgb_.b * 255.0)
* printf("\ndiff RGB dR:%g dG:%g dB:%g :\n", # <<<<<<<<<<<<<<
* rgb_.r * - <double>i, rgb_.g * 255.0 - <double>j, rgb_.b * 255.0 - <double>k)
*
*/
(void)(printf(((char const *)"\ndiff RGB dR:%g dG:%g dB:%g :\n"), (__pyx_v_rgb_.r * (-((double)__pyx_v_i))), ((__pyx_v_rgb_.g * 255.0) - ((double)__pyx_v_j)), ((__pyx_v_rgb_.b * 255.0) - ((double)__pyx_v_k))));
/* "testing.pyx":17
*
*
* cdef inline void show_error(unsigned int i, unsigned int j, unsigned int k, RGB_ rgb_): # <<<<<<<<<<<<<<
* """
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "testing.pyx":32
*
*
* cpdef void rgb_to_hsv_testing(bint wall_ = False, double tolerance_ = TOLERANCE)nogil: # <<<<<<<<<<<<<<
* """
* TEST RGB TO HSV AND HSV TO RGB
*/
static PyObject *__pyx_pw_3HSV_7testing_1rgb_to_hsv_testing(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static void __pyx_f_3HSV_7testing_rgb_to_hsv_testing(CYTHON_UNUSED int __pyx_skip_dispatch, struct __pyx_opt_args_3HSV_7testing_rgb_to_hsv_testing *__pyx_optional_args) {
int __pyx_v_wall_ = ((int)0);
double __pyx_v_tolerance_ = ((double)1e-07);
int __pyx_v_i;
int __pyx_v_j;
int __pyx_v_k;
__pyx_t_3HSV_3hsv_HSV_ __pyx_v_hsv_;
__pyx_t_3HSV_3hsv_RGB_ __pyx_v_rgb_;
__Pyx_RefNannyDeclarations
long __pyx_t_1;
long __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save;
#endif
__Pyx_RefNannySetupContext("rgb_to_hsv_testing", 1);
if (__pyx_optional_args) {
if (__pyx_optional_args->__pyx_n > 0) {
__pyx_v_wall_ = __pyx_optional_args->wall_;
if (__pyx_optional_args->__pyx_n > 1) {
__pyx_v_tolerance_ = __pyx_optional_args->tolerance_;
}
}
}
/* "testing.pyx":33
*
* cpdef void rgb_to_hsv_testing(bint wall_ = False, double tolerance_ = TOLERANCE)nogil:
* """ # <<<<<<<<<<<<<<
* TEST RGB TO HSV AND HSV TO RGB
*
*/
/*try:*/ {
/* "testing.pyx":54
*
* # Loop over every RGB values possible
* for i in prange(256): # <<<<<<<<<<<<<<
* for j in range(256):
* for k in range(256):
*/
if ((1 == 0)) abort();
{
__pyx_t_3HSV_3hsv_HSV_ __pyx_parallel_temp0;
int __pyx_parallel_temp1 = ((int)0xbad0bad0);
int __pyx_parallel_temp2 = ((int)0xbad0bad0);
int __pyx_parallel_temp3 = ((int)0xbad0bad0);
__pyx_t_3HSV_3hsv_RGB_ __pyx_parallel_temp4;
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_2 = (0x100 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_2 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_3, __pyx_t_4, __pyx_t_5) firstprivate(__pyx_t_6) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for lastprivate(__pyx_v_hsv_) firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) lastprivate(__pyx_v_j) lastprivate(__pyx_v_k) lastprivate(__pyx_v_rgb_)
#endif /* _OPENMP */
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_2; __pyx_t_1++){
if (__pyx_parallel_why < 2)
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_1);
/* Initialize private variables to invalid values */
__pyx_v_j = ((int)0xbad0bad0);
__pyx_v_k = ((int)0xbad0bad0);
/* "testing.pyx":55
* # Loop over every RGB values possible
* for i in prange(256):
* for j in range(256): # <<<<<<<<<<<<<<
* for k in range(256):
*
*/
for (__pyx_t_3 = 0; __pyx_t_3 < 0x100; __pyx_t_3+=1) {
__pyx_v_j = __pyx_t_3;
/* "testing.pyx":56
* for i in prange(256):
* for j in range(256):
* for k in range(256): # <<<<<<<<<<<<<<
*
* hsv_ = struct_rgb_to_hsv(i * ONE_255, j * ONE_255, k * ONE_255)
*/
for (__pyx_t_4 = 0; __pyx_t_4 < 0x100; __pyx_t_4+=1) {
__pyx_v_k = __pyx_t_4;
/* "testing.pyx":58
* for k in range(256):
*
* hsv_ = struct_rgb_to_hsv(i * ONE_255, j * ONE_255, k * ONE_255) # <<<<<<<<<<<<<<
* rgb_ = struct_hsv_to_rgb(hsv_.h, hsv_.s, hsv_.v)
*
*/
__pyx_v_hsv_ = struct_rgb_to_hsv((__pyx_v_i * __pyx_v_3HSV_7testing_ONE_255), (__pyx_v_j * __pyx_v_3HSV_7testing_ONE_255), (__pyx_v_k * __pyx_v_3HSV_7testing_ONE_255));
/* "testing.pyx":59
*
* hsv_ = struct_rgb_to_hsv(i * ONE_255, j * ONE_255, k * ONE_255)
* rgb_ = struct_hsv_to_rgb(hsv_.h, hsv_.s, hsv_.v) # <<<<<<<<<<<<<<
*
* if rgb_.r * 255.0 - <double>i > tolerance_:
*/
__pyx_v_rgb_ = struct_hsv_to_rgb(__pyx_v_hsv_.h, __pyx_v_hsv_.s, __pyx_v_hsv_.v);
/* "testing.pyx":61
* rgb_ = struct_hsv_to_rgb(hsv_.h, hsv_.s, hsv_.v)
*
* if rgb_.r * 255.0 - <double>i > tolerance_: # <<<<<<<<<<<<<<
* with gil:
* show_error(i, j, k, rgb_)
*/
__pyx_t_5 = ((((__pyx_v_rgb_.r * 255.0) - ((double)__pyx_v_i)) > __pyx_v_tolerance_) != 0);
if (__pyx_t_5) {
/* "testing.pyx":62
*
* if rgb_.r * 255.0 - <double>i > tolerance_:
* with gil: # <<<<<<<<<<<<<<
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error")
*/
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
/*try:*/ {
/* "testing.pyx":63
* if rgb_.r * 255.0 - <double>i > tolerance_:
* with gil:
* show_error(i, j, k, rgb_) # <<<<<<<<<<<<<<
* if wall_: raise ValueError("\nMismatch error")
*
*/
__pyx_f_3HSV_7testing_show_error(__pyx_v_i, __pyx_v_j, __pyx_v_k, __pyx_v_rgb_);
/* "testing.pyx":64
* with gil:
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error") # <<<<<<<<<<<<<<
*
* if rgb_.g * 255.0 - <double>j > tolerance_:
*/
__pyx_t_5 = (__pyx_v_wall_ != 0);
if (unlikely(__pyx_t_5)) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 64, __pyx_L18_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 64, __pyx_L18_error)
}
}
/* "testing.pyx":62
*
* if rgb_.r * 255.0 - <double>i > tolerance_:
* with gil: # <<<<<<<<<<<<<<
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error")
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L19;
}
__pyx_L18_error: {
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L8_error;
}
__pyx_L19:;
}
}
/* "testing.pyx":61
* rgb_ = struct_hsv_to_rgb(hsv_.h, hsv_.s, hsv_.v)
*
* if rgb_.r * 255.0 - <double>i > tolerance_: # <<<<<<<<<<<<<<
* with gil:
* show_error(i, j, k, rgb_)
*/
}
/* "testing.pyx":66
* if wall_: raise ValueError("\nMismatch error")
*
* if rgb_.g * 255.0 - <double>j > tolerance_: # <<<<<<<<<<<<<<
* with gil:
* show_error(i, j, k, rgb_)
*/
__pyx_t_5 = ((((__pyx_v_rgb_.g * 255.0) - ((double)__pyx_v_j)) > __pyx_v_tolerance_) != 0);
if (__pyx_t_5) {
/* "testing.pyx":67
*
* if rgb_.g * 255.0 - <double>j > tolerance_:
* with gil: # <<<<<<<<<<<<<<
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error")
*/
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
/*try:*/ {
/* "testing.pyx":68
* if rgb_.g * 255.0 - <double>j > tolerance_:
* with gil:
* show_error(i, j, k, rgb_) # <<<<<<<<<<<<<<
* if wall_: raise ValueError("\nMismatch error")
*
*/
__pyx_f_3HSV_7testing_show_error(__pyx_v_i, __pyx_v_j, __pyx_v_k, __pyx_v_rgb_);
/* "testing.pyx":69
* with gil:
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error") # <<<<<<<<<<<<<<
*
* if rgb_.b * 255.0 - <double>k > tolerance_:
*/
__pyx_t_5 = (__pyx_v_wall_ != 0);
if (unlikely(__pyx_t_5)) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 69, __pyx_L25_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 69, __pyx_L25_error)
}
}
/* "testing.pyx":67
*
* if rgb_.g * 255.0 - <double>j > tolerance_:
* with gil: # <<<<<<<<<<<<<<
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error")
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L26;
}
__pyx_L25_error: {
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L8_error;
}
__pyx_L26:;
}
}
/* "testing.pyx":66
* if wall_: raise ValueError("\nMismatch error")
*
* if rgb_.g * 255.0 - <double>j > tolerance_: # <<<<<<<<<<<<<<
* with gil:
* show_error(i, j, k, rgb_)
*/
}
/* "testing.pyx":71
* if wall_: raise ValueError("\nMismatch error")
*
* if rgb_.b * 255.0 - <double>k > tolerance_: # <<<<<<<<<<<<<<
* with gil:
* show_error(i, j, k, rgb_)
*/
__pyx_t_5 = ((((__pyx_v_rgb_.b * 255.0) - ((double)__pyx_v_k)) > __pyx_v_tolerance_) != 0);
if (__pyx_t_5) {
/* "testing.pyx":72
*
* if rgb_.b * 255.0 - <double>k > tolerance_:
* with gil: # <<<<<<<<<<<<<<
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error")
*/
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
/*try:*/ {
/* "testing.pyx":73
* if rgb_.b * 255.0 - <double>k > tolerance_:
* with gil:
* show_error(i, j, k, rgb_) # <<<<<<<<<<<<<<
* if wall_: raise ValueError("\nMismatch error")
*
*/
__pyx_f_3HSV_7testing_show_error(__pyx_v_i, __pyx_v_j, __pyx_v_k, __pyx_v_rgb_);
/* "testing.pyx":74
* with gil:
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error") # <<<<<<<<<<<<<<
*
*/
__pyx_t_5 = (__pyx_v_wall_ != 0);
if (unlikely(__pyx_t_5)) {
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 74, __pyx_L32_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(0, 74, __pyx_L32_error)
}
}
/* "testing.pyx":72
*
* if rgb_.b * 255.0 - <double>k > tolerance_:
* with gil: # <<<<<<<<<<<<<<
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error")
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L33;
}
__pyx_L32_error: {
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
goto __pyx_L8_error;
}
__pyx_L33:;
}
}
/* "testing.pyx":71
* if wall_: raise ValueError("\nMismatch error")
*
* if rgb_.b * 255.0 - <double>k > tolerance_: # <<<<<<<<<<<<<<
* with gil:
* show_error(i, j, k, rgb_)
*/
}
}
}
goto __pyx_L36;
__pyx_L8_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L35;
__pyx_L35:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates0)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_hsv_;
__pyx_parallel_temp1 = __pyx_v_i;
__pyx_parallel_temp2 = __pyx_v_j;
__pyx_parallel_temp3 = __pyx_v_k;
__pyx_parallel_temp4 = __pyx_v_rgb_;
}
__pyx_L36:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
__Pyx_XDECREF(__pyx_t_6);
__pyx_t_6 = NULL;
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_hsv_ = __pyx_parallel_temp0;
__pyx_v_i = __pyx_parallel_temp1;
__pyx_v_j = __pyx_parallel_temp2;
__pyx_v_k = __pyx_parallel_temp3;
__pyx_v_rgb_ = __pyx_parallel_temp4;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L4_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "testing.pyx":33
*
* cpdef void rgb_to_hsv_testing(bint wall_ = False, double tolerance_ = TOLERANCE)nogil:
* """ # <<<<<<<<<<<<<<
* TEST RGB TO HSV AND HSV TO RGB
*
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
goto __pyx_L5;
}
__pyx_L4_error: {
#ifdef WITH_THREAD
__pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
goto __pyx_L1_error;
}
__pyx_L5:;
}
/* "testing.pyx":32
*
*
* cpdef void rgb_to_hsv_testing(bint wall_ = False, double tolerance_ = TOLERANCE)nogil: # <<<<<<<<<<<<<<
* """
* TEST RGB TO HSV AND HSV TO RGB
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_WriteUnraisable("HSV.testing.rgb_to_hsv_testing", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 1);
__pyx_L0:;
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* Python wrapper */
static PyObject *__pyx_pw_3HSV_7testing_1rgb_to_hsv_testing(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static char __pyx_doc_3HSV_7testing_rgb_to_hsv_testing[] = "\n TEST RGB TO HSV AND HSV TO RGB \n \n Loop over every RGB values from 0 .. 255 and determine the HSV values corresponding to the \n RGB value.Convert the HSV value back to RGB (monitoring the maximum deviation between real value \n and calculated value) and raise an error if the deviation is over the tolerance 1e-7\n \n :param wall_ : boolean; default False; stop at test at first tolerance issue when True otherwise continue \n :param tolerance_: float; python float representing the maximum tolerance, default is 1e-7. The tolerance \n represent the maximum deviation for the original value (original value - calculated value) that should not \n exceed the tolerance\n :return: void\n ";
static PyObject *__pyx_pw_3HSV_7testing_1rgb_to_hsv_testing(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
int __pyx_v_wall_;
double __pyx_v_tolerance_;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("rgb_to_hsv_testing (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wall,&__pyx_n_s_tolerance,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wall);
if (value) { values[0] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 1:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_tolerance);
if (value) { values[1] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "rgb_to_hsv_testing") < 0)) __PYX_ERR(0, 32, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
}
if (values[0]) {
__pyx_v_wall_ = __Pyx_PyObject_IsTrue(values[0]); if (unlikely((__pyx_v_wall_ == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 32, __pyx_L3_error)
} else {
__pyx_v_wall_ = ((int)0);
}
if (values[1]) {
__pyx_v_tolerance_ = __pyx_PyFloat_AsDouble(values[1]); if (unlikely((__pyx_v_tolerance_ == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 32, __pyx_L3_error)
} else {
__pyx_v_tolerance_ = ((double)1e-07);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("rgb_to_hsv_testing", 0, 0, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 32, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("HSV.testing.rgb_to_hsv_testing", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_3HSV_7testing_rgb_to_hsv_testing(__pyx_self, __pyx_v_wall_, __pyx_v_tolerance_);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3HSV_7testing_rgb_to_hsv_testing(CYTHON_UNUSED PyObject *__pyx_self, int __pyx_v_wall_, double __pyx_v_tolerance_) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
struct __pyx_opt_args_3HSV_7testing_rgb_to_hsv_testing __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("rgb_to_hsv_testing", 0);
__Pyx_XDECREF(__pyx_r);
__pyx_t_1.__pyx_n = 2;
__pyx_t_1.wall_ = __pyx_v_wall_;
__pyx_t_1.tolerance_ = __pyx_v_tolerance_;
__pyx_f_3HSV_7testing_rgb_to_hsv_testing(0, &__pyx_t_1);
__pyx_t_2 = __Pyx_void_to_None(NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 32, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("HSV.testing.rgb_to_hsv_testing", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyMethodDef __pyx_methods[] = {
{"rgb_to_hsv_testing", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_3HSV_7testing_1rgb_to_hsv_testing, METH_VARARGS|METH_KEYWORDS, __pyx_doc_3HSV_7testing_rgb_to_hsv_testing},
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_testing(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_testing},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"testing",
0, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_kp_s_Mismatch_error, __pyx_k_Mismatch_error, sizeof(__pyx_k_Mismatch_error), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_n_s_tolerance, __pyx_k_tolerance, sizeof(__pyx_k_tolerance), 0, 0, 1, 1},
{&__pyx_n_s_wall, __pyx_k_wall, sizeof(__pyx_k_wall), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 55, __pyx_L1_error)
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 64, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "testing.pyx":64
* with gil:
* show_error(i, j, k, rgb_)
* if wall_: raise ValueError("\nMismatch error") # <<<<<<<<<<<<<<
*
* if rgb_.g * 255.0 - <double>j > tolerance_:
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Mismatch_error); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 64, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC inittesting(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC inittesting(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_testing(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_testing(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_testing(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'testing' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_testing(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("testing", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_HSV__testing) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "HSV.testing")) {
if (unlikely(PyDict_SetItemString(modules, "HSV.testing", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
(void)__Pyx_modinit_type_init_code();
(void)__Pyx_modinit_type_import_code();
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "testing.pyx":14
* DEF TOLERANCE = 1e-7
*
* cdef double ONE_255 = 1.0 / 255.0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_3HSV_7testing_ONE_255 = (1.0 / 255.0);
/* "testing.pyx":1
* # cython: binding=False, boundscheck=False, wraparound=False, nonecheck=False, cdivision=True, optimize.use_switch=True # <<<<<<<<<<<<<<
* # encoding: utf-8
*
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init HSV.testing", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init HSV.testing");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* WriteUnraisableException */
static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
int full_traceback, CYTHON_UNUSED int nogil) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
__Pyx_PyThreadState_declare
#ifdef WITH_THREAD
PyGILState_STATE state;
if (nogil)
state = PyGILState_Ensure();
#ifdef _MSC_VER
else state = (PyGILState_STATE)-1;
#endif
#endif
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
if (full_traceback) {
Py_XINCREF(old_exc);
Py_XINCREF(old_val);
Py_XINCREF(old_tb);
__Pyx_ErrRestore(old_exc, old_val, old_tb);
PyErr_PrintEx(1);
}
#if PY_MAJOR_VERSION < 3
ctx = PyString_FromString(name);
#else
ctx = PyUnicode_FromString(name);
#endif
__Pyx_ErrRestore(old_exc, old_val, old_tb);
if (!ctx) {
PyErr_WriteUnraisable(Py_None);
} else {
PyErr_WriteUnraisable(ctx);
Py_DECREF(ctx);
}
#ifdef WITH_THREAD
if (nogil)
PyGILState_Release(state);
#endif
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
GB_binop__iseq_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__iseq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__iseq_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint32)
// A*D function (colscale): GB (_AxD__iseq_uint32)
// D*A function (rowscale): GB (_DxB__iseq_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint32)
// C=scalar+B GB (_bind1st__iseq_uint32)
// C=scalar+B' GB (_bind1st_tran__iseq_uint32)
// C=A+scalar GB (_bind2nd__iseq_uint32)
// C=A'+scalar GB (_bind2nd_tran__iseq_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_UINT32 || GxB_NO_ISEQ_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__iseq_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nzz.c | // nzz: compute two-dimensional per-bin redshift distribution
// ---
// author: Nicolas Tessore <nicolas.tessore@manchester.ac.uk>
// date: 28 May 2019
#define _XOPEN_SOURCE 600
#include <stdlib.h>
#include <stdio.h>
#include <stdbool.h>
#include <math.h>
#include <time.h>
#include <signal.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static const int RW = 6;
int mapsort(const void* a, const void* b)
{
const double* x = a;
const double* y = b;
if(x[5] < y[5])
return -1;
if(x[5] > y[5])
return +1;
if(x[2] < y[2])
return -1;
if(x[2] > y[2])
return +1;
if(x[1] < y[1])
return -1;
if(x[1] > y[1])
return +1;
if(x[0] < y[0])
return -1;
if(x[0] > y[0])
return +1;
return 0;
}
static inline int index(double x, double y, double z, double s, int w, int h)
{
return (int)(z/s)*(w*h) + (int)(y/s)*w + (int)(x/s);
}
static inline int query(int q, const int ma[], int gx, int gy, int gz,
int gr, int* c, int v[])
{
int i, il, ih, j, jl, jh, k, kl, kh, l, m, n, p;
i = q/(gx*gy);
j = (q/gx)%gy;
k = q%gx;
il = i > gr ? i-gr : 0;
ih = i+gr < gz ? i+gr+1 : gz;
jl = j > gr ? j-gr : 0;
jh = j+gr < gy ? j+gr+1 : gy;
kl = k > gr ? k-gr : 0;
kh = k+gr < gx ? k+gr+1 : gx;
n = 0;
p = -1;
for(i = il; i < ih; ++i)
{
for(j = jl; j < jh; ++j)
{
k = (i*gy + j)*gx;
l = ma[k + kl];
m = ma[k + kh];
if(l == p)
p = (v[2*n-1] = m);
else
p = (v[2*n+0] = l, v[2*n+1] = m), ++n;
}
}
*c = n;
return q;
}
#include "io.c" // yes, really
static const char* ANIM[] = {
"\033[34m\xe2\xa0\xb7\033[0m", "\033[34m\xe2\xa0\xaf\033[0m",
"\033[34m\xe2\xa0\x9f\033[0m", "\033[34m\xe2\xa0\xbb\033[0m",
"\033[34m\xe2\xa0\xbd\033[0m", "\033[34m\xe2\xa0\xbe\033[0m"
};
static const int NANIM = sizeof(ANIM)/sizeof(*ANIM);
volatile sig_atomic_t AL;
volatile sig_atomic_t QQ;
void handler(int s)
{
AL = (s == SIGALRM);
QQ = (s == SIGQUIT);
signal(s, handler);
}
int main(int argc, char* argv[])
{
char* cfgfile;
struct config cfg;
bool ls, sc, tc;
int nc, nd, nr;
double dl, dh, d0, dm, Dl, Dh, rl, rh, rm;
double ui, uo;
int rc[2];
double* rv[2];
int cc, xc;
double* cv;
double* xv;
double gs;
int gr;
double xl, xh, yl, yh, zl, zh;
int gx, gy, gz, ng;
int* ma;
double* Z;
double* N;
double s;
time_t st;
int dt;
int i, j;
char* bf, *nf, *sv, *sx;
if(isatty(fileno(stdout)))
{
bf = "\033[1m";
nf = "\033[0m";
sv = "\033[32m\xe2\x9c\x94\033[0m";
sx = "\033[31m\xe2\x9c\x98\033[0m";
}
else
{
bf = nf = "";
sv = sx = ">";
}
cfgfile = NULL;
memset(&cfg, 0, sizeof(cfg));
if(argc > 5)
goto err_usage;
if(argc > 1 && strcmp(argv[1], "--") != 0)
cfgfile = strdup(argv[1]);
if(argc > 2 && strcmp(argv[2], "--") != 0)
cfg.output = strdup(argv[2]);
for(i = 3; i < argc; ++i)
if(strcmp(argv[i], "--") != 0)
cfg.catv[cfg.catc++] = strdup(argv[i]);
if(!cfgfile)
cfgfile = strdup("nzz.cfg");
readcfg(cfgfile, &cfg);
printf("\n");
printf("%sconfiguration file %s%s\n", bf, cfgfile, nf);
printf("\n");
printcfg(&cfg);
printf("\n");
sc = cfg.coords >= COORDS_LONLAT;
ls = cfg.spacing == SPACING_LOG;
ui = UCONV[cfg.units];
uo = UCONV[cfg.thunit];
nd = cfg.nth;
dl = cfg.thmin*uo;
dh = cfg.thmax*uo;
nr = cfg.nz;
rl = cfg.zmin;
rh = cfg.zmax;
#ifdef _OPENMP
if(cfg.num_threads)
omp_set_num_threads(cfg.num_threads);
tc = cfg.thread_data == TDATA_COPY;
#else
tc = false;
#endif
if(sc)
{
dl = 2*sin(0.5*dl);
dh = 2*sin(0.5*dh);
}
if(ls)
{
d0 = log(dl);
dm = nd/(log(dh) - d0);
}
else
{
d0 = dl;
dm = nd/(dh - d0);
}
Dl = dl*dl;
Dh = dh*dh;
rm = nr/(rh - rl);
for(nc = 0; nc < cfg.catc; ++nc)
{
printf("%sread catalog %d%s\n", bf, nc, nf);
fflush(stdout);
rc[nc] = 0;
rv[nc] = NULL;
readc(cfg.catv[nc], cfg.coords, ui, &rc[nc], &rv[nc]);
printf("%s done with %d points\n", sv, rc[nc]);
printf("\n");
}
printf("%sbuild index%s\n", bf, nf);
fflush(stdout);
gs = 0.25*dh;
gr = ceil(dh/gs);
xl = xh = rv[0][0];
yl = yh = rv[0][1];
zl = zh = rv[0][2];
for(j = 0; j < nc; ++j)
{
for(i = 1; i < rc[j]; ++i)
{
if(rv[j][i*RW+0] < xl) xl = rv[j][i*RW+0];
if(rv[j][i*RW+0] > xh) xh = rv[j][i*RW+0];
if(rv[j][i*RW+1] < yl) yl = rv[j][i*RW+1];
if(rv[j][i*RW+1] > yh) yh = rv[j][i*RW+1];
if(rv[j][i*RW+2] < zl) zl = rv[j][i*RW+2];
if(rv[j][i*RW+2] > zh) zh = rv[j][i*RW+2];
}
}
gx = floor((xh - xl)/gs) + 1;
gy = floor((yh - yl)/gs) + 1;
gz = floor((zh - zl)/gs) + 1;
ng = gx*gy*gz;
for(j = 0; j < nc; ++j)
{
for(i = 0; i < rc[j]; ++i)
rv[j][i*RW+5] = index(rv[j][i*RW+0]-xl, rv[j][i*RW+1]-yl,
rv[j][i*RW+2]-zl, gs, gx, gy);
qsort(rv[j], rc[j], RW*sizeof(double), mapsort);
}
ma = malloc((ng+1)*sizeof(int));
if(!ma)
goto err_alloc;
cc = rc[0];
cv = rv[0];
xc = rc[nc-1];
xv = rv[nc-1];
for(i = 0, j = 0; i < ng; ++i)
{
while(j < xc && xv[j*RW+5] < i)
j += 1;
ma[i] = j;
}
ma[ng] = xc;
printf("%s done with %d x %d x %d grid cells\n", sv, gx, gy, gz);
printf("\n");
Z = calloc(nd, sizeof(double));
N = calloc(nd*nr*nr, sizeof(double));
if(!Z || !N)
goto err_alloc;
s = 0;
signal(SIGALRM, handler);
signal(SIGQUIT, handler);
AL = QQ = 0;
printf("%sworking%s\n", bf, nf);
fflush(stdout);
st = time(NULL);
dt = 0;
#pragma omp parallel default(none) shared(Z, N, s, AL, QQ, st, dt) \
firstprivate(ls, sc, tc, nd, nc, d0, dm, Dl, Dh, nr, rl, rm, \
gr, gx, gy, gz, ng, cc, cv, xc, xv, ma, ANIM, NANIM, stdout)
{
int q, qc, nq;
int* qr;
double* cv_;
double* xv_;
int* ma_;
double* Z_;
double* N_;
double s_;
bool fb;
int i, j, jh;
nq = 0;
qr = malloc((2*gr+1)*(2*gr+1)*2*sizeof(int));
if(!qr)
perror(NULL), abort();
if(tc)
{
cv_ = malloc(cc*RW*sizeof(double));
if(cv != xv)
xv_ = malloc(xc*RW*sizeof(double));
else
xv_ = cv_;
ma_ = malloc((ng+1)*sizeof(int));
if(!cv_ || !xv_ || !ma_)
perror(NULL), abort();
memcpy(cv_, cv, cc*RW*sizeof(double));
if(cv != xv)
memcpy(xv_, xv, xc*RW*sizeof(double));
memcpy(ma_, ma, (ng+1)*sizeof(int));
}
else
{
cv_ = cv;
xv_ = xv;
ma_ = ma;
}
Z_ = calloc(nd, sizeof(double));
N_ = calloc(nd*nr*nr, sizeof(double));
if(!Z_ || !N_)
perror(NULL), abort();
s_ = 0;
fb = false;
#pragma omp master
if(isatty(fileno(stdout)))
{
fb = true;
AL = false;
alarm(1);
#ifdef _OPENMP
printf("\r%s %d thread(s) ", ANIM[0], omp_get_num_threads());
fflush(stdout);
#endif
}
qc = -1;
#pragma omp for schedule(dynamic, 1) nowait
for(i = 0; i < cc; ++i)
{
const double xi = cv_[i*RW+0];
const double yi = cv_[i*RW+1];
const double zi = cv_[i*RW+2];
const double ri = cv_[i*RW+3];
const double wi = cv_[i*RW+4];
const int qi = cv_[i*RW+5];
const int ni = rm*(ri - rl);
if(QQ)
continue;
if(AL && fb)
{
dt = difftime(time(NULL), st);
printf("\r%s %.2f%%", ANIM[dt%NANIM], 100.*i/cc);
printf(" in %02d:%02d:%02d ", dt/3600, (dt/60)%60, dt%60);
fflush(stdout);
AL = false;
alarm(1);
}
if(ni < 0 || ni >= nr)
continue;
if(qi != qc)
qc = query(qi, ma, gx, gy, gz, gr, &nq, qr);
for(q = 0; q < nq; ++q)
{
for(j = qr[2*q+0], jh = qr[2*q+1]; j < jh; ++j)
{
const double xj = xv_[j*RW+0];
const double yj = xv_[j*RW+1];
const double zj = xv_[j*RW+2];
const double rj = xv_[j*RW+3];
const double wj = xv_[j*RW+4];
const int nj = rm*(rj - rl);
const double dx = xi - xj;
const double dy = yi - yj;
const double dz = zi - zj;
const double D = dx*dx + dy*dy + dz*dz;
if(nj >= 0 && nj < nr && D >= Dl && D < Dh)
{
const int k = dm*((ls ? 0.5*log(D) : sqrt(D)) - d0);
const int l = k*nr*nr + ni*nr + nj;
Z_[k] += wi*wj;
N_[l] += wi*wj;
s_ += 1;
}
}
}
}
#pragma omp critical
{
for(j = 0; j < nd; ++j)
{
const int k = j*nr*nr;
Z[j] += Z_[j];
for(i = 0; i < nr*nr; ++i)
N[k+i] += (Z_[j]/Z[j])*(N_[k+i]/Z_[j] - N[k+i]);
}
s += s_;
}
free(qr);
free(Z_);
free(N_);
if(tc)
{
free(cv_);
if(cv_ != xv_)
free(xv_);
free(ma_);
}
}
dt = difftime(time(NULL), st);
if(isatty(fileno(stdin)))
printf("\r");
printf("%s done with %.0f pairs", sv, s);
printf(" in %02d:%02d:%02d \n", dt/3600, (dt/60)%60, dt%60);
printf("\n");
output(cfg.output, nd, nr, N);
free(Z);
free(N);
free(ma);
for(j = 0; j < nc; ++j)
free(rv[j]);
free(cfgfile);
freecfg(&cfg);
return EXIT_SUCCESS;
err_usage:
fprintf(stderr, "usage: nzz [config] [output] [cat ...]\n");
return EXIT_FAILURE;
err_alloc:
perror(NULL);
return EXIT_FAILURE;
}
|
GB_binop__lt_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__lt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_01__lt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__lt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__lt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__lt_fp32)
// A*D function (colscale): GB (_AxD__lt_fp32)
// D*A function (rowscale): GB (_DxB__lt_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__lt_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__lt_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lt_fp32)
// C=scalar+B GB (_bind1st__lt_fp32)
// C=scalar+B' GB (_bind1st_tran__lt_fp32)
// C=A+scalar GB (_bind2nd__lt_fp32)
// C=A'+scalar GB (_bind2nd_tran__lt_fp32)
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LT || GxB_NO_FP32 || GxB_NO_LT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__lt_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__lt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__lt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__lt_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__lt_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__lt_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__lt_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__lt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pi_openmp.c | /*
Compute the value of pi
Input:
Number of steps to be performed for the computation
Output:
Value of pi
*/
#include <omp.h>
#include <stdio.h>
int main(int argc, char** argv) {
double pi, sum, step, x, aux;
int num_steps, i;
printf("Enter number of terms: ");
fflush(stdout);
scanf("%d", &num_steps);
x=0;
sum = 0.0;
step = 1.0/(double) num_steps;
#pragma omp parallel private(i,x,aux) shared(sum)
{
#pragma omp for schedule(static)
for(i=0; i < num_steps; i++) {
x = (i + 0.5) * step;
aux = 4.0 / (1.0 + x * x);
#pragma omp critical
sum = sum + aux;
}
}
pi = step * sum;
printf("Value of pi = %lf\n", pi);
} |
zpbtrf.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_tuning.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
/***************************************************************************//**
*
* @ingroup plasma_pbtrf
*
* Performs the Cholesky factorization of an Hermitian positive matrix A,
*
* \f[ A = L \times L^T \f] or \f[ A = U^T \times U \f]
*
* if uplo = upper or lower, respectively, where L is lower triangular with
* positive diagonal elements, and U is upper triangular.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] n
* The number of columns of the matrix A. n >= 0.
*
* @param[in] kd
* The number of subdiagonals within the band of A if uplo=upper,
* or the number of superdiagonals if uplo=lower. kd >= 0.
*
* @param[in,out] AB
* On entry, the upper or lower triangle of the Hermitian band
* matrix A, stored in the first KD+1 rows of the array. The
* j-th column of A is stored in the j-th column of the array AB
* as follows:
* if UPLO = 'U', AB(kd+1+i-j,j) = A(i,j) for max(1,j-kd) <= i <= j;
* if UPLO = 'L', AB(1+i-j,j) = A(i,j) for j <= i <= min(n,j+kd).
* \n
* On exit, if INFO = 0, the triangular factor U or L from the
* Cholesky factorization A = U^H*U or A = L*L^H of the band
* matrix A, in the same storage format as A.
*
* @param[in] ldab
* The leading dimension of the array AB. ldab >= 2*kl+ku+1.
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
* @retval > 0 if i, the leading minor of order i of A is not
* positive definite, so the factorization could not
* be completed, and the solution has not been computed.
*
*******************************************************************************
*
* @sa plasma_omp_zpbtrf
* @sa plasma_cpbtrf
* @sa plasma_dpbtrf
* @sa plasma_spbtrf
*
******************************************************************************/
int plasma_zpbtrf(plasma_enum_t uplo,
int n, int kd,
plasma_complex64_t *pAB, int ldab)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (kd < 0) {
plasma_error("illegal value of kd");
return -3;
}
if (ldab < kd+1) {
plasma_error("illegal value of ldab");
return -5;
}
// quick return
if (imax(n, 0) == 0)
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_pbtrf(plasma, PlasmaComplexDouble, n);
// Set tiling parameters.
int nb = plasma->nb;
// Initialize tile matrix descriptors.
int lm = nb*(1+(kd+nb-1)/nb);
plasma_desc_t AB;
int retval;
retval = plasma_desc_general_band_create(PlasmaComplexDouble, uplo, nb, nb,
lm, n, 0, 0, n, n, kd, kd, &AB);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_band_create() failed");
return retval;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zpb2desc(pAB, ldab, AB, &sequence, &request);
// Call the tile async function.
plasma_omp_zpbtrf(uplo, AB, &sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2pb(AB, pAB, ldab, &sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&AB);
// Return status.
int status = sequence.status;
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_pbtrf
*
* Performs the Cholesky factorization of a Hermitian positive definite
* matrix.
* Non-blocking tile version of plasma_zpbtrf().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] AB
* Descriptor of matrix AB.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zpbtrf
* @sa plasma_omp_zpbtrf
* @sa plasma_omp_cpbtrf
* @sa plasma_omp_dpbtrf
* @sa plasma_omp_spbtrf
*
******************************************************************************/
void plasma_omp_zpbtrf(plasma_enum_t uplo, plasma_desc_t AB,
plasma_sequence_t *sequence, plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if ((uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(AB) != PlasmaSuccess) {
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
plasma_error("invalid A");
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (AB.m == 0)
return;
// Call the parallel function.
plasma_pzpbtrf(uplo, AB, sequence, request);
}
|
argon2_fmt_plug.c | /*
* This software is Copyright (c) 2016 Agnieszka Bielec <bielecagnieszka8 at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* merged argon2d and argon2i into a single format file. JimF.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_argon2;
#elif FMT_REGISTERS_H
john_register_one(&fmt_argon2);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "argon2.h"
#include "argon2_core.h"
#include "argon2_encoding.h"
#include "memdbg.h"
#define FORMAT_LABEL "argon2"
#define FORMAT_NAME ""
#define FORMAT_TAG_d "$argon2d$"
#define FORMAT_TAG_i "$argon2i$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG_d)-1)
#if defined(__XOP__)
#define ALGORITHM_NAME "Blake2 XOP"
#elif defined(__AVX__)
#define ALGORITHM_NAME "Blake2 AVX"
#elif defined(__SSSE3__)
#define ALGORITHM_NAME "Blake2 SSSE3"
#elif defined(__SSE2__)
#define ALGORITHM_NAME "Blake2 SSE2"
#else
#define ALGORITHM_NAME "Blake2"
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 100 //only in john
#define BINARY_SIZE 256 //only in john
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_SIZE 64 //only in john
#define SALT_ALIGN sizeof(uint32_t)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define OMP_SCALE 16
#ifdef _OPENMP
#define THREAD_NUMBER omp_get_thread_num()
#else
#define THREAD_NUMBER 1
#endif
static struct fmt_tests tests[] = {
{"$argon2d$v=19$m=4096,t=3,p=1$ZGFtYWdlX2RvbmU$w9w3s5/zV8+PcAZlJhnTCOE+vBkZssmZf6jOq3dKv50","password"},
{"$argon2i$v=19$m=4096,t=3,p=1$ZGFtYWdlX2RvbmU$N59QwnpxDQZRj1/cO6bqm408dD6Z2Z9LKYpwFJSPVKA","password"},
{"$argon2d$v=19$m=4096,t=3,p=1$c2hvcnRfc2FsdA$zMrTcOAOUje6UqObRVh84Pe1K6gumcDqqGzRM0ILzYmj","sacrificed"},
{"$argon2i$v=19$m=4096,t=3,p=1$c2hvcnRfc2FsdA$1l4kAwUdAApoCbFH7ghBEf7bsdrOQzE4axIJ3PV0Ncrd","sacrificed"},
{"$argon2d$v=19$m=16384,t=3,p=1$c2hvcnRfc2FsdA$TLSTPihIo+5F67Y1vJdfWdB9","blessed_dead"},
{"$argon2i$v=19$m=16384,t=3,p=1$c2hvcnRfc2FsdA$vvjDVog22A5x9eljmB+2yC8y","blessed_dead"},
{"$argon2d$v=19$m=16384,t=4,p=3$YW5vdGhlcl9zYWx0$yw93eMxC8REPAwbQ0e/q43jR9+RI9HI/DHP75uzm7tQfjU734oaI3dzcMWjYjHzVQD+J4+MG+7oyD8dN/PtnmPCZs+UZ67E+rkXJ/wTvY4WgXgAdGtJRrAGxhy4rD7d5G+dCpqhrog","death_dying"},
{"$argon2i$v=19$m=16384,t=4,p=3$YW5vdGhlcl9zYWx0$K7unxwO5aeuZCpnIJ06FMCRKod3eRg8oIRzQrK3E6mGbyqlTvvl47jeDWq/5drF1COJkEF9Ty7FWXJZHa+vqlf2YZGp/4qSlAvKmdtJ/6JZU32iQItzMRwcfujHE+PBjbL5uz4966A","death_dying"},
{NULL}
};
struct argon2_salt {
uint32_t t_cost, m_cost, lanes;
uint32_t hash_size;
uint32_t salt_length;
char salt[SALT_SIZE];
argon2_type type;
};
static struct argon2_salt saved_salt;
static region_t * memory;
static void **pseudo_rands;
static char *saved_key;
static int threads;
static size_t saved_mem_size;
static uint32_t saved_segment_length;
static unsigned char *crypted;
static void *get_salt(char *ciphertext);
static void init(struct fmt_main *self)
{
int i;
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
threads=omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#else
threads=1;
#endif
saved_key =
malloc(self->params.max_keys_per_crypt * (PLAINTEXT_LENGTH + 1));
memset(saved_key, 0,
self->params.max_keys_per_crypt * (PLAINTEXT_LENGTH + 1));
crypted = malloc(self->params.max_keys_per_crypt * (BINARY_SIZE));
memset(crypted, 0, self->params.max_keys_per_crypt * (BINARY_SIZE));
memory=malloc(threads*sizeof(region_t));
pseudo_rands=malloc(threads*sizeof(void*));
for (i=0;i<threads;i++)
{
init_region_t(&memory[i]);
pseudo_rands[i]=NULL;
}
saved_mem_size=0;
saved_segment_length=0;
}
static void done(void)
{
int i;
free(saved_key);
free(crypted);
for (i=0;i<threads;i++)
{
free_region_t(&memory[i]);
free(pseudo_rands[i]);
}
free(memory);
free(pseudo_rands);
}
static void print_memory(double memory)
{
char s[]="\0kMGT";
int i=0;
while(memory>=1024)
{
memory/=1024;
i++;
}
printf("memory per hash : %.2lf %cB\n",memory,s[i]);
}
static void reset(struct db_main *db)
{
static int printed=0;
if (!printed && options.verbosity > VERB_LEGACY)
{
int i;
uint32_t m_cost, prev_m_cost;
m_cost=prev_m_cost=0;
if (!db) {
for (i = 0; tests[i].ciphertext; i++)
{
struct argon2_salt *salt;
salt=get_salt(tests[i].ciphertext);
m_cost = MAX(m_cost, salt->m_cost);
if (i==0)
{
printf("\n");
prev_m_cost=m_cost;
print_memory(sizeof(block)*m_cost);
}
}
if (prev_m_cost!=m_cost)
{
printf("max ");
print_memory(sizeof(block)*m_cost);
}
} else {
struct db_salt *salts = db->salts;
while (salts != NULL) {
struct argon2_salt * salt=salts->salt;
m_cost = MAX(m_cost, salt->m_cost);
salts = salts->next;
}
printf("\n");
print_memory(sizeof(block)*m_cost);
}
}
}
static void ctx_init(argon2_context *ctx)
{
//size_t maxadlen = ctx->adlen;
//size_t maxsaltlen = ctx->saltlen;
//size_t maxoutlen = ctx->outlen;
static uint8_t out[BINARY_SIZE];
static uint8_t salt[SALT_SIZE];
ctx->adlen=0;
ctx->saltlen=SALT_SIZE;
ctx->outlen=BINARY_SIZE;
ctx->out=out;
ctx->salt=salt;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
argon2_context ctx;
int res;
ctx_init(&ctx);
if (!strncmp(ciphertext, FORMAT_TAG_d, FORMAT_TAG_LEN))
res=argon2_decode_string(&ctx, ciphertext, Argon2_d);
else if (!strncmp(ciphertext, FORMAT_TAG_i, FORMAT_TAG_LEN))
res=argon2_decode_string(&ctx, ciphertext, Argon2_i);
else
return 0;
if (res!=ARGON2_OK || ctx.outlen < 8)
return 0;
return 1;
}
static void set_key(char *key, int index)
{
int len;
len = strlen(key);
if (len > PLAINTEXT_LENGTH)
len = PLAINTEXT_LENGTH;
memcpy(saved_key + index * (PLAINTEXT_LENGTH + 1), key, len);
saved_key[index * (PLAINTEXT_LENGTH + 1) + len] = 0;
}
static char *get_key(int index)
{
return saved_key + index * (PLAINTEXT_LENGTH + 1);
}
static void *get_binary(char *ciphertext)
{
static char out[BINARY_SIZE];
argon2_context ctx;
ctx_init(&ctx);
if (!strncmp(ciphertext, FORMAT_TAG_d, FORMAT_TAG_LEN))
argon2_decode_string(&ctx, ciphertext, Argon2_d);
else
argon2_decode_string(&ctx, ciphertext, Argon2_i);
memset(out, 0, BINARY_SIZE);
memcpy(out, ctx.out, ctx.outlen);
return out;
}
static void *get_salt(char *ciphertext)
{
static struct argon2_salt salt;
argon2_context ctx;
memset(&salt,0,sizeof(salt));
ctx_init(&ctx);
if (!strncmp(ciphertext, FORMAT_TAG_d, FORMAT_TAG_LEN)) {
argon2_decode_string(&ctx, ciphertext, Argon2_d);
salt.type = Argon2_d;
} else {
argon2_decode_string(&ctx, ciphertext, Argon2_i);
salt.type = Argon2_i;
}
salt.salt_length = ctx.saltlen;
salt.m_cost = ctx.m_cost;
salt.t_cost = ctx.t_cost;
salt.lanes = ctx.lanes;
salt.hash_size = ctx.outlen;
memcpy(salt.salt, ctx.salt, ctx.saltlen);
return (void *)&salt;
}
static void set_salt(void *salt)
{
uint32_t i;
size_t mem_size;
uint32_t segment_length, memory_blocks;
memcpy(&saved_salt,salt,sizeof(struct argon2_salt));
mem_size=sizeof(block)*saved_salt.m_cost;
memory_blocks = saved_salt.m_cost;
if (memory_blocks < 2 * ARGON2_SYNC_POINTS * saved_salt.lanes) {
memory_blocks = 2 * ARGON2_SYNC_POINTS * saved_salt.lanes;
}
segment_length = memory_blocks / (saved_salt.lanes * ARGON2_SYNC_POINTS);
if (mem_size>saved_mem_size)
{
if (saved_mem_size>0)
for (i=0;i<threads;i++)
free_region_t(&memory[i]);
for (i=0;i<threads;i++)
alloc_region_t(&memory[i],mem_size);
saved_mem_size=mem_size;
}
if (segment_length>saved_segment_length)
{
if (saved_segment_length>0)
for (i=0;i<threads;i++)
free(pseudo_rands[i]);
for (i=0;i<threads;i++)
pseudo_rands[i]=malloc(sizeof(uint64_t) * segment_length);
saved_segment_length=segment_length;
}
}
static int cmp_all(void *binary, int count)
{
int i;
for (i = 0; i < count; i++) {
if (!memcmp(binary, crypted + i * BINARY_SIZE, saved_salt.hash_size))
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypted + index * BINARY_SIZE, saved_salt.hash_size);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int i;
const int count = *pcount;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++) {
argon2_hash(saved_salt.t_cost, saved_salt.m_cost, saved_salt.lanes, saved_key + i * (PLAINTEXT_LENGTH + 1), strlen(saved_key + i * (PLAINTEXT_LENGTH + 1)), saved_salt.salt,
saved_salt.salt_length, crypted + i * BINARY_SIZE, saved_salt.hash_size, 0, 0, saved_salt.type, ARGON2_VERSION_NUMBER, memory[THREAD_NUMBER%threads].aligned, pseudo_rands[THREAD_NUMBER%threads]);
}
return count;
}
static int get_hash_0(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
uint32_t *crypt = (uint32_t *) (crypted + index * BINARY_SIZE);
return crypt[0] & PH_MASK_6;
}
static int salt_hash(void *_salt)
{
int i;
struct argon2_salt *salt = (struct argon2_salt*)_salt;
unsigned int hash = 0;
char *p = salt->salt;
for (i=0;i<salt->salt_length;i++) {
hash <<= 1;
hash += (unsigned char)*p++;
if (hash >> SALT_HASH_LOG) {
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
}
}
hash ^= hash >> SALT_HASH_LOG;
hash &= (SALT_HASH_SIZE - 1);
return hash;
}
#if FMT_MAIN_VERSION > 11
static unsigned int tunable_cost_t(void *_salt)
{
struct argon2_salt *salt=(struct argon2_salt *)_salt;
return salt->t_cost;
}
static unsigned int tunable_cost_m(void *_salt)
{
struct argon2_salt *salt=(struct argon2_salt *)_salt;
return salt->m_cost;
}
static unsigned int tunable_cost_p(void *_salt)
{
struct argon2_salt *salt=(struct argon2_salt *)_salt;
return salt->lanes;
}
static unsigned int tunable_cost_type(void *_salt)
{
struct argon2_salt *salt=(struct argon2_salt *)_salt;
return (int)salt->type;
}
#endif
struct fmt_main fmt_argon2 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
sizeof(struct argon2_salt),
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP |
#endif
FMT_CASE | FMT_8_BIT,
{
"t",
"m",
"p",
"type [0:Argon2d 1:Argon2i]"
},
{0},
tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
tunable_cost_t,
tunable_cost_m,
tunable_cost_p,
tunable_cost_type,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
polarity.c | #include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <string.h>
#include <math.h>
#include <mpi.h>
#include "parmt_polarity.h"
#ifdef PARMT_USE_INTEL
#include <mkl_cblas.h>
#else
#include <cblas.h>
#endif
#include "ttimes.h"
#include "iscl/array/array.h"
#include "iscl/geodetic/geodetic.h"
#include "iscl/memory/memory.h"
#define LDG 8
#ifndef MAX
#define MAX(x,y) (((x) > (y)) ? (x) : (y))
#endif
static void fillBasis(const double i, const double phi,
double *__restrict__ gam,
double *__restrict__ phat,
double *__restrict__ phihat);
static double computeContraction3x3(const double *__restrict__ a,
const double *__restrict__ M,
const double *__restrict__ b);
static void setM3x3(const int k, double *__restrict__ M);
static int computePadding64f(const int n);
static int performPolaritySearch64f(const int nmt, const int ldm,
const int nobs,
const int blockSize, const int mblock,
const int Mrows, const int Kcols,
const double *__restrict__ Dmat,
const double *__restrict__ G,
const double *__restrict__ Sigma,
const double *__restrict__ mts,
double *__restrict__ phi);
/*!
* @brief Driver routine for computing the polarity Green's functions from the
* ttimes ak135 global travel time table.
*
* @param[in] globalComm Global MPI communicator.
* @param[in] parms Contains the polarity modeling parameters.
* @param[in] data The SAC data whose header information will define
* the polarity and channel information.
*
* @param[out] polarityData On exit contains the corresponding polarity Green's
* functions that can be used by the grid-search to
* estimate polarities from a given moment tensor.
*
* @result 0 indicates success.
*/
int parmt_polarity_computeTTimesGreens(
const MPI_Comm globalComm,
const struct parmtPolarityParms_struct parms,
const struct parmtData_struct data,
struct polarityData_struct *polarityData)
{
char kt0[8], kcmpnm[8], stat[8];
double G6[6], *cmpazs, *cmpincs, *deps, *evlas, *evlos,
*GxxBuf, *GyyBuf, *GzzBuf, *GxyBuf, *GxzBuf, *GyzBuf,
*stlas, *stlos, cmpinc, cmpincSAC, cmpaz, stla, stlo;
int *icomps, *observation, *polarity, *waveType, icomp, ierr, ierrAll,
iloc, iobs, ipol, it, iwav, jloc, k, kt, myid, nPolarity, nprocs;
size_t lenos;
const int master = 0;
const int nTimeVars = 11;
const enum sacHeader_enum timeVarNames[11]
= {SAC_CHAR_KA,
SAC_CHAR_KT0, SAC_CHAR_KT1, SAC_CHAR_KT2, SAC_CHAR_KT3,
SAC_CHAR_KT4, SAC_CHAR_KT5, SAC_CHAR_KT6, SAC_CHAR_KT7,
SAC_CHAR_KT8, SAC_CHAR_KT9};
//------------------------------------------------------------------------//
//
// Initialize
ierr = 0;
cmpazs = NULL;
cmpincs = NULL;
deps = NULL;
evlas = NULL;
evlos = NULL;
GxxBuf = NULL;
GyyBuf = NULL;
GzzBuf = NULL;
GxyBuf = NULL;
GxzBuf = NULL;
GyzBuf = NULL;
icomps = NULL;
observation = NULL;
polarity = NULL;
stlas = NULL;
stlos = NULL;
waveType = NULL;
memset(polarityData, 0, sizeof(struct polarityData_struct));
// ttimes uses read-only file-io and i don't want to fix it
MPI_Comm_rank(globalComm, &myid);
MPI_Comm_size(globalComm, &nprocs);
//if (myid != master){goto WAIT;}
// Verify there is a chance for something to do
if (data.nobs < 1 || data.nlocs < 1)
{
if (data.nobs < 1){fprintf(stderr, "%s: No observations\n", __func__);}
if (data.nlocs < 1){fprintf(stderr, "%s: No locations\n", __func__);}
return 0;
}
// Extract the depths in the grid-search from the first waveform
iobs = 0;
deps = memory_calloc64f(data.nlocs);
evlas = memory_calloc64f(data.nlocs);
evlos = memory_calloc64f(data.nlocs);
for (iloc=0; iloc<data.nlocs; iloc++)
{
k = iobs*data.nlocs + iloc;
ierr = sacio_getFloatHeader(SAC_FLOAT_EVDP, data.sacGxx[k].header,
&deps[iloc]);
ierr += sacio_getFloatHeader(SAC_FLOAT_EVLA, data.sacGxx[k].header,
&evlas[iloc]);
ierr += sacio_getFloatHeader(SAC_FLOAT_EVLO, data.sacGxx[k].header,
&evlos[iloc]);
if (ierr != 0)
{
fprintf(stderr, "%s: Unable to get event coordinates on %d\n",
__func__, myid);
goto ERROR;
}
}
// Compute the number of usable polarities (i.e. observations with P picks
// or S picks with first motions)
nPolarity = 0;
if (myid == master)
{
polarity = memory_calloc32i(data.nobs);
waveType = memory_calloc32i(data.nobs);
icomps = memory_calloc32i(data.nobs);
stlas = memory_calloc64f(data.nobs);
stlos = memory_calloc64f(data.nobs);
cmpazs = memory_calloc64f(data.nobs);
cmpincs = memory_calloc64f(data.nobs);
observation = array_set32i(data.nobs, -1, &ierr);
for (iobs=0; iobs<data.nobs; iobs++)
{
// Ensure the essential preliminary information is defined
ierr = sacio_getFloatHeader(SAC_FLOAT_CMPINC,
data.data[iobs].header, &cmpincSAC);
ierr += sacio_getFloatHeader(SAC_FLOAT_CMPAZ,
data.data[iobs].header, &cmpaz);
ierr += sacio_getFloatHeader(SAC_FLOAT_STLA,
data.data[iobs].header, &stla);
ierr += sacio_getFloatHeader(SAC_FLOAT_STLO,
data.data[iobs].header, &stlo);
ierr += sacio_getCharacterHeader(SAC_CHAR_KCMPNM,
data.data[iobs].header, kcmpnm);
if (ierr != 0)
{
fprintf(stderr, "%s: Failed to get header information\n",
__func__);
continue;
}
// Figure out the component
ierr = parmt_utils_getComponent(kcmpnm, cmpincSAC, &icomp);
if (ierr != 0)
{
fprintf(stderr, "%s: Failed to classify component\n", __func__);
}
// SAC to SEED convention
cmpinc = cmpincSAC - 90.0;
// Figure out the component
/*
lenos = MAX(1, strlen(kcmpnm));
icomp = 1;
if (kcmpnm[lenos-1] == 'Z' || kcmpnm[lenos-1] == '1')
{
icomp = 1;
}
else if (kcmpnm[lenos-1] == 'N' || kcmpnm[lenos-1] == '2')
{
icomp = 2;
}
else if (kcmpnm[lenos-1] == 'E' || kcmpnm[lenos-1] == '3')
{
icomp = 3;
}
else
{
fprintf(stderr, "%s: Cannot classify component %s\n", __func__, kcmpnm);
continue;
}
*/
// Get the primary pick
for (it=0; it<nTimeVars; it++)
{
ierr = sacio_getCharacterHeader(timeVarNames[it],
data.data[iobs].header,
kt0);
if (ierr == 0){break;}
}
lenos = strlen(kt0);
// Ensure the pick is defined
if (ierr == 0 && lenos > 1)
{
iwav = 1;
if (kt0[0] == 'P' || kt0[0] == 'p')
{
iwav = 1;
}
else if (kt0[0] == 'S' || kt0[0] == 's')
{
iwav = 2;
}
else
{
// surface waves will commonly be processed and not
// have polarities
if (kt0[0] == 'R' || kt0[0] == 'r' ||
kt0[0] == 'L' || kt0[0] == 'l')
{
continue;
}
fprintf(stderr, "%s: t0 phase is not a P or S phase %s\n",
__func__, kt0);
continue;
}
if (kt0[lenos-1] == '+')
{
ipol = 1;
}
else if (kt0[lenos-1] == '-')
{
ipol =-1;
}
else
{
fprintf(stderr, "%s: could not classify polarity %s\n",
__func__, kt0);
continue;
}
// let user know something happened
sacio_getCharacterHeader(SAC_CHAR_KSTNM,
data.data[iobs].header, stat);
fprintf(stdout, "%s: Polarity for %s is %d\n",
__func__, stat, ipol);
//printf("%f %f %f %f %d %d %d %d\n", stla, stlo, cmpinc, cmpaz, icomp, iobs, iwav, ipol);
// save information for modeling
stlas[nPolarity] = stla;
stlos[nPolarity] = stlo;
cmpincs[nPolarity] = cmpinc;
cmpazs[nPolarity] = cmpaz;
icomps[nPolarity] = icomp;
observation[nPolarity] = iobs;
waveType[nPolarity] = iwav;
polarity[nPolarity] = ipol;
nPolarity = nPolarity + 1;
//printf("%d %d %f %f\n", iwav, ipol, cmpinc, cmpaz);
} // End basic header info check
} // Loop on observations
} // End check on myid == master
// Tell all other processes about the forward modeling information
MPI_Bcast(&nPolarity, 1, MPI_INT, master, globalComm);
if (nPolarity == 0)
{
if (myid == master)
{
fprintf(stdout, "%s: There are no polarities\n", __func__);
}
ierr = 0;
goto ERROR;
}
if (myid != master)
{
stlas = memory_calloc64f(nPolarity);
stlos = memory_calloc64f(nPolarity);
cmpincs = memory_calloc64f(nPolarity);
cmpazs = memory_calloc64f(nPolarity);
icomps = memory_calloc32i(nPolarity);
observation = memory_calloc32i(nPolarity);
waveType = memory_calloc32i(nPolarity);
polarity = memory_calloc32i(nPolarity);
}
else
{
fprintf(stdout, "%s: Warning - i'm setting wts to unity for now\n",
__func__);
}
MPI_Bcast(stlas, nPolarity, MPI_DOUBLE, master, globalComm);
MPI_Bcast(stlos, nPolarity, MPI_DOUBLE, master, globalComm);
MPI_Bcast(cmpincs, nPolarity, MPI_DOUBLE, master, globalComm);
MPI_Bcast(cmpazs, nPolarity, MPI_DOUBLE, master, globalComm);
MPI_Bcast(icomps, nPolarity, MPI_INT, master, globalComm);
MPI_Bcast(observation, nPolarity, MPI_INT, master, globalComm);
MPI_Bcast(waveType, nPolarity, MPI_INT, master, globalComm);
MPI_Bcast(polarity, nPolarity, MPI_INT, master, globalComm);
// Set space
ierrAll = 0;
GxxBuf = memory_calloc64f(data.nlocs*nPolarity);
GyyBuf = memory_calloc64f(data.nlocs*nPolarity);
GzzBuf = memory_calloc64f(data.nlocs*nPolarity);
GxyBuf = memory_calloc64f(data.nlocs*nPolarity);
GxzBuf = memory_calloc64f(data.nlocs*nPolarity);
GyzBuf = memory_calloc64f(data.nlocs*nPolarity);
polarityData->Gxx = memory_calloc64f(data.nlocs*nPolarity);
polarityData->Gyy = memory_calloc64f(data.nlocs*nPolarity);
polarityData->Gzz = memory_calloc64f(data.nlocs*nPolarity);
polarityData->Gxy = memory_calloc64f(data.nlocs*nPolarity);
polarityData->Gxz = memory_calloc64f(data.nlocs*nPolarity);
polarityData->Gyz = memory_calloc64f(data.nlocs*nPolarity);
// Set the data
polarityData->polarity = memory_calloc64f(nPolarity);
for (ipol=0; ipol<nPolarity; ipol++)
{
polarityData->polarity[ipol] = (double) polarity[ipol];
}
// Set the weights
// TODO fix me
polarityData->wts = array_set64f(nPolarity, 1.0, &ierr);
// Compute the forward modeling matrix columns
for (jloc=0; jloc<data.nlocs; jloc=jloc+nprocs)
{
iloc = jloc + myid;
if (iloc >= data.nlocs){continue;}
// Compute the forward modeling matrix for this observation group
for (ipol=0; ipol<nPolarity; ipol++)
{
kt = iloc*nPolarity + ipol;
ierr = parmt_polarity_computeGreensRowFromTtimes(
waveType[ipol], icomps[ipol],
evlas[iloc], evlos[iloc], deps[iloc],
stlas[ipol], stlos[ipol],
cmpincs[ipol], cmpazs[ipol],
parms.ttimesTablesDir,
parms.ttimesModel,
G6);
if (ierr != 0)
{
fprintf(stderr,
"%s: Error computing polarities %d %d on PID %d\n",
__func__, iloc, ipol, myid);
ierrAll = ierrAll + 1;
continue;
}
// Save it
GxxBuf[kt] = G6[0];
GyyBuf[kt] = G6[1];
GzzBuf[kt] = G6[2];
GxyBuf[kt] = G6[3];
GxzBuf[kt] = G6[4];
GyzBuf[kt] = G6[5];
//printf("%f %f %f %e %e %e %e %e %e\n", stlas[ipol], stlos[ipol], deps[iloc], G6[0], G6[1], G6[2], G6[3], G6[4], G6[5]);
} // Loop on the polarities
//printf("\n");
} // Loop on the locations
ierr = ierrAll;
if (ierr != 0)
{
fprintf(stderr, "%s: Error computing polarities from ttimes\n",
__func__);
ierr = 1;
goto ERROR;
}
polarityData->nPolarity = nPolarity;
polarityData->nlocs = data.nlocs;
polarityData->nobs = data.nobs;
polarityData->obsMap = array_copy32i(nPolarity, observation, &ierr);
MPI_Allreduce(GxxBuf, polarityData->Gxx, data.nlocs*nPolarity,
MPI_DOUBLE, MPI_SUM, globalComm);
MPI_Allreduce(GyyBuf, polarityData->Gyy, data.nlocs*nPolarity,
MPI_DOUBLE, MPI_SUM, globalComm);
MPI_Allreduce(GzzBuf, polarityData->Gzz, data.nlocs*nPolarity,
MPI_DOUBLE, MPI_SUM, globalComm);
MPI_Allreduce(GxyBuf, polarityData->Gxy, data.nlocs*nPolarity,
MPI_DOUBLE, MPI_SUM, globalComm);
MPI_Allreduce(GxzBuf, polarityData->Gxz, data.nlocs*nPolarity,
MPI_DOUBLE, MPI_SUM, globalComm);
MPI_Allreduce(GyzBuf, polarityData->Gyz, data.nlocs*nPolarity,
MPI_DOUBLE, MPI_SUM, globalComm);
// Finally assemble Green's functions into modeling matrix
//WAIT:;
ERROR:;
MPI_Barrier(globalComm);
// free memory
memory_free64f(&GxxBuf);
memory_free64f(&GyyBuf);
memory_free64f(&GzzBuf);
memory_free64f(&GxyBuf);
memory_free64f(&GxzBuf);
memory_free64f(&GyzBuf);
memory_free64f(&deps);
memory_free64f(&evlas);
memory_free64f(&evlos);
memory_free64f(&stlas);
memory_free64f(&stlos);
memory_free64f(&cmpincs);
memory_free64f(&cmpazs);
memory_free32i(&observation);
memory_free32i(&polarity);
memory_free32i(&icomps);
memory_free32i(&waveType);
return ierr;
}
//============================================================================//
/*!
* @brief Computes the row of the Green's functions matrix for modeling
* a polarity from the ttimes model.
*
* @param[in] data Contains the pick type and channel information.
* @param[in] wavetype P_WAVE (1) indicates a P-wave.
* @param[in] wavetype S_WAVE (2) indicates an S-wave.
* @param[in] evdp Depth of the event in kilometers.
* @param[in] dirnm Directory containing the iasp-tau binary files.
* @param[in] model Name of the model, e.g., ak135.
*
* @param[out] G On exit contains the Green's functions so that for
* the i'th row of G, \f$ G_i \cdot \textbf{m} \f$
* computes an estimate of polarity. This is packed
* in order
* \f$
* \{m_{xx}, m_{yy}, m_{zz}, m_{xy}, m_{xz}, m_{yz} \}
* \f$
* and must have a dimension of at least [6].
*
* @result 0 indicates success.
*
* @copyright ISTI distributed under the Apache 2 license.
*
*/
int parmt_polarity_computeGreensRowFromData(const struct sacData_struct data,
const int wavetype,
const double evdp,
const char *dirnm,
const char *model,
double *__restrict__ G)
{
char kcmpnm[16];
double cmpaz, cmpinc, cmpincSAC, evla, evlo, stla, stlo;
int icomp, ierr;
size_t lenos;
memset(kcmpnm, 16, 16*sizeof(char));
ierr = sacio_getFloatHeader(SAC_FLOAT_CMPINC, data.header, &cmpincSAC);
ierr += sacio_getFloatHeader(SAC_FLOAT_CMPAZ, data.header, &cmpaz);
ierr += sacio_getFloatHeader(SAC_FLOAT_EVLA, data.header, &evla);
ierr += sacio_getFloatHeader(SAC_FLOAT_EVLO, data.header, &evlo);
ierr += sacio_getFloatHeader(SAC_FLOAT_STLA, data.header, &stla);
ierr += sacio_getFloatHeader(SAC_FLOAT_STLO, data.header, &stlo);
ierr += sacio_getCharacterHeader(SAC_CHAR_KCMPNM, data.header, kcmpnm);
if (ierr != 0)
{
fprintf(stderr, "%s: Failed to get header information\n", __func__);
return -1;
}
// Figure out the component
ierr = parmt_utils_getComponent(kcmpnm, cmpincSAC, &icomp);
if (ierr != 0)
{
fprintf(stderr, "%s: Failed to classify component\n", __func__);
return -1;
}
// SAC to SEED convention
cmpinc = cmpincSAC - 90.0;
// Figure out the component
lenos = MAX(1, strlen(kcmpnm));
icomp = 1;
if (kcmpnm[lenos-1] == 'Z' || kcmpnm[lenos-1] == '1')
{
icomp = 1;
}
else if (kcmpnm[lenos-1] == 'N' || kcmpnm[lenos-1] == '2')
{
icomp = 2;
}
else if (kcmpnm[lenos-1] == 'E' || kcmpnm[lenos-1] == '3')
{
icomp = 3;
}
else
{
fprintf(stderr, "%s: Can't classify component %s\n", __func__, kcmpnm);
return -1;
}
ierr = parmt_polarity_computeGreensRowFromTtimes(wavetype, icomp,
evla, evlo, evdp,
stla, stlo,
cmpinc, cmpaz,
dirnm, model,
G);
if (ierr != 0)
{
fprintf(stderr, "%s: Failed to compute G\n", __func__);
}
return ierr;
}
//============================================================================//
/*!
* @brief Computes a row of the Green's functions matrix from ttimes
*
* @param[in] wavetype If 1 then this is a P wave.
* @param[in] wavetype If 2 then this is an S wave.
* @param[in] icomp If 1 then this is the vertical channel.
* @param[in] icomp If 2 then this is the north (1 or 2) channel.
* @param[in] icomp If 3 then this is the east (2 or 3) channel.
* @param[in] evla Event latitude (degrees).
* @param[in] evlo Event longitude (degrees).
* @param[in] evdp Event depth (km).
* @param[in] stla Station latitude (degrees).
* @param[in] stlo Station longitude (degrees).
* @param[in] cmpaz Component azimuth (0 north, +90 east).
* @param[in] cmpinc Component inclination (-90 up, 0 east/north, +90 down).
* @param[in] dirnm Directory containing the ttimes precomputed binary
* files. If NULL then the default as dicated by the
* ttimes configuration will be used.
* @param[in] model Model name (e.g., ak135 or iasp91)
*
* @param[out] G Row of matrix s.t. G*m produces estimates the polarity
* at the station. Here m is packed
* \f$ \{m_{xx}, m_{yy}, m_{zz},
* m_{xy}, m_{xz}, m_{yz} \} \f$
* This must have dimension of at least 6.
*
* @author Ben Baker
*
* @copyright ISTI distributed under Apache 2
*
*/
int parmt_polarity_computeGreensRowFromTtimes(
const int wavetype, const int icomp,
const double evla, const double evlo, const double evdp,
const double stla, const double stlo,
const double cmpinc, const double cmpaz,
const char *dirnm, const char *model,
double *__restrict__ G)
{
double aoiRec, az, azSrc, baz, bazRec, dist, delta, toaSrc;
struct ttimesTravelTime_struct ttime;
int ierr;
ierr = 0;
if (G == NULL)
{
fprintf(stderr, "%s: Error G is NULL\n", __func__);
return -1;
}
memset(G, 0, 6*sizeof(double));
if (evdp < 0.0 || evdp > ttimes_getMaxDepth())
{
fprintf(stderr, "%s: Error depth must be between [0,%f]\n", __func__,
ttimes_getMaxDepth());
return -1;
}
memset(&ttime, 0, sizeof(struct ttimesTravelTime_struct));
geodetic_gps2distanceAzimuth(evla, evlo, stla, stlo,
&dist, &delta, &az, &baz);
if (wavetype == 1)
{
ierr = ttimes_getFirstPPhase(delta, evdp, dirnm, model, &ttime);
}
else if (wavetype == 2)
{
ierr = ttimes_getFirstSPhase(delta, evdp, dirnm, model, &ttime);
}
else
{
fprintf(stderr, "%s: Invalid phase type - must be 1 (P) or 2 (S)\n",
__func__);
return -1;
}
if (ierr != 0)
{
fprintf(stderr, "%s: Error computing theoretical traveltime info\n",
__func__);
return -1;
}
// Compute the column in the Green's function matrix
toaSrc = ttime.toang;
azSrc = az;
bazRec = baz;
aoiRec = ttime.aoi;
//printf("%f %f %f %f %f %f %f\n", delta, stla, stlo, toaSrc, azSrc, aoiRec, bazRec);
ierr = parmt_polarity_computeGreensMatrixRow(wavetype, icomp,
azSrc, toaSrc, bazRec, aoiRec,
cmpinc, cmpaz, G);
if (ierr != 0)
{
fprintf(stderr, "%s: Failed to compute polarity for row\n", __func__);
memset(G, 0, 6*sizeof(double));
return -1;
}
return 0;
}
//============================================================================//
/*!
* @brief Computes a column for the Green's function s.t. G*m produces an
* estimate of polarity on the icomp'th component measured in the
* far-field. The moment tensor which would be applied to this row
* is packed:
* \f$ \{m_{xx}, m_{yy}, m_{zz}, m_{xy}, m_{xz}, m_{yz} \} \f$
* with convention North, East, Down (e.g., Jost and Herrmann).
* For more see Quantitative Seismology - Aki and Richards 2002,
* Eqn 4.96 on pg 111 and Source Mechanisms of Earthquakes: Theory
* and Practice - Udias et al. pg 100..
*
* @param[in] wavetype =1 -> P wave
* =2 -> S wave
* @param[in] icomp receiver component of motion:
* =1 -> Vertical channel
* =2 -> 1 or North channel
* =3 -> 2 or East channel
* @param[in] azSrc source to receiver azimuth is measured positive from
* north (degrees)
* @param[in] toaSrc take-off angle (measured positive from x3 where x3
* points down) (degrees)
* @param[in] bazRec receiver to source back azimuth measured positive
* from north (degrees)
* @param[in] aoiRec angle of incidence at receiver
* @param[in] cmpaz component azimuth (0 north, +90 east)
* @param[in] cmpinc component inclinantion (-90 up, 0 east/north, +90 down)
*
* @param[out] G row of matrix s.t. G*m produces estimates the polarity
* at the station. Here m is packed
* \f$ \{m_{xx}, m_{yy}, m_{zz},
* m_{xy}, m_{xz}, m_{yz} \} \f$
*
* @bugs i've really only looked at the vertical p-teleseismic case - no idea
* about other phases, wavetypes
*
* @author Ben Baker
*
* @copyright ISTI distributed under Apache 2
*
*/
int parmt_polarity_computeGreensMatrixRow(const int wavetype,
const int icomp,
const double azSrc,
const double toaSrc,
const double bazRec,
const double aoiRec,
const double cmpinc,
const double cmpaz,
double *__restrict__ G)
{
double M[9], G63[18], up[6], ush[6], usv[6],
gam[3], lhat[3], phat[3], phihat[3],
cosba, cos_cmpaz, cost_rec,
r11, r12, r13, r21, r22, r23, r31, r32, r33,
sinba, sin_cmpaz,
sint_rec,
t1, t2, t3, theta, xsign, u1, u2, ue, un, uz;
int k;
const double pi180 = M_PI/180.0;
const bool lrot = true;//false;
const int P_WAVE = 1;
const int S_WAVE = 2;
//------------------------------------------------------------------------//
//
// Error check
if (icomp < 1 || icomp > 3)
{
fprintf(stderr, "%s: Invalid component\n", __func__);
return -1;
}
if (wavetype < P_WAVE || wavetype > S_WAVE)
{
fprintf(stderr, "%s: Invalid wavetype\n", __func__);
return -1;
}
// Fill the basis at the source (Aki and Richards Eqn 4.88)
fillBasis(toaSrc, azSrc, gam, phat, phihat);
// Compute the contractions for u_p, u_sv, and u_sh (A&R Eqn 4.96)
// for all 6 individual moment tensor terms
for (k=0; k<6; k++)
{
// Set the moment tensor with the k'th mt term 1 others zero
setM3x3(k, M);
// Compute contraction
up[k] = computeContraction3x3(gam, M, gam);
usv[k] = computeContraction3x3(phat, M, gam);
ush[k] = computeContraction3x3(phihat, M, gam);
}
// Fill the basis at the receiver - notice the basis uses the forward
// azimuth from the receiver to the source
fillBasis(aoiRec, (bazRec + 180.0), lhat, phat, phihat);
// Compute geometric factors at receiver
//printf("%f %f %f %f %f\n", toaSrc, azSrc, bazRec, aoiRec, cmpaz);
theta = aoiRec*pi180;
cosba = cos(bazRec*pi180);
cost_rec = cos(theta);
sinba = sin(bazRec*pi180);
sint_rec = sin(theta);
cos_cmpaz = cos(cmpaz*pi180);
sin_cmpaz = sin(cmpaz*pi180);
// Set 3D rotation matrix
r11 = cost_rec;
r21 = sint_rec;
r31 = 0.0;
r12 =-sint_rec*sinba;
r22 = cost_rec*sinba;
r32 = -cosba;
r13 =-sint_rec*cosba;
r23 = cost_rec*cosba;
r33 = sinba;
// Flip sign for receivers that acquire positive down
xsign = 1.0;
if (fabs(cmpinc - 90.0) < 1.e-4){xsign =-1.0;}
// Compute 6 x 3 subforward modeling matrix with row for up (1 channel)
if (wavetype == P_WAVE)
{
// Loop on mts terms
for (k=0; k<6; k++)
{
// Extract the (north, east, down) component
t3 =-up[k]*lhat[0]; // z-down -> z-up
//t3 = up[k]*lhat[0]; // z-up
t2 = up[k]*lhat[1]; // east
t1 = up[k]*lhat[2]; // north
// Not sure if i have to rotate LQT -> ZNE
if (lrot)
{
uz = t1*r11 + t2*r21 + t3*r31; // Z
ue = t1*r12 + t2*r22 + t3*r32; // E
un = t1*r13 + t2*r23 + t3*r33; // N
}
else
{
uz = t3;
ue = t2;
un = t1;
}
// Rotate into (1, 2)
u1 = un*cos_cmpaz + ue*sin_cmpaz;
u2 =-un*sin_cmpaz + ue*cos_cmpaz;
// Finish
G63[k*3+0] = xsign*uz;
//G63[k*3+0] =-xsign*uz;
G63[k*3+1] = u1;
G63[k*3+2] = u2;
}
}
// SH wave
else
{
// Loop on mts terms
for (k=0; k<6; k++)
{
// Extract the (north, east, down) component
t3 =-usv[k]*phat[0] - ush[k]*phihat[0]; // z-down -> z-up
t2 = usv[k]*phat[1] + ush[k]*phihat[1]; // east
t1 = usv[k]*phat[2] + ush[k]*phihat[2]; // north
// Not sure if i have to rotate LQT -> ZNE
if (lrot)
{
uz = t1*r11 + t2*r21 + t3*r31; // Z
ue = t1*r12 + t2*r22 + t3*r32; // E
un = t1*r13 + t2*r23 + t3*r33; // N
}
else
{
uz = t3;
ue = t2;
un = t1;
}
// Rotate into (1, 2)
u1 = un*cos_cmpaz + ue*sin_cmpaz;
u2 =-un*sin_cmpaz + ue*cos_cmpaz;
// Finish
G63[k*3+0] = xsign*uz;
G63[k*3+1] = u1;
G63[k*3+2] = u2;
}
}
// Copy the result - vertical
if (icomp == 1)
{
for (k=0; k<6; k++)
{
G[k] = G63[k*3+0];
}
}
// 1 component
else if (icomp == 2)
{
for (k=0; k<6; k++)
{
G[k] = G63[k*3+1];
}
}
// 2 component
else if (icomp == 3)
{
for (k=0; k<6; k++)
{
G[k] = G63[k*3+2];
}
}
return 0;
}
//============================================================================//
/*!
* @brief Fills in the basis vectors - Aki and Richards pg 108 Eqn 4.88.
*
* @param[in] i Take-off angle (degrees).
* @param[in] phi Azimuth angle (degrees).
* @param[in] gam P-wave direction. This has dimension [3].
* @param[in] phat SV-wave direction. This has dimension [3].
* @param[in] phihat SH-wave direction. This has dimension [3].
*
* @copyright ISTI distributed under the Apache 2 license.
*
*/
static void fillBasis(const double i, const double phi,
double *__restrict__ gam,
double *__restrict__ phat,
double *__restrict__ phihat)
{
double cosi, sini, cosp, sinp;
const double pi180 = M_PI/180.0;
cosi = cos(i*pi180);
sini = sin(i*pi180);
cosp = cos(phi*pi180);
sinp = sin(phi*pi180);
gam[0] = sini*cosp;
gam[1] = sini*sinp;
gam[2] = cosi;
phat[0] = cosi*cosp;
phat[1] = cosi*sinp;
phat[2] =-sini;
phihat[0] =-sinp;
phihat[1] = cosp;
phihat[2] = 0.0;
}
//============================================================================//
/*!
* @brief Sets the moment tensor matrix where the k'th moment tensor
* term is 1 and others are zero. Here moment tensor terms
* are counted {0,1,2,3,4,5} = {xx,yy,zz,xy,xz,yz}
*
* @param[in] k Moment tensor index. This is in the range [0,5] and follows
* the mapping {0,1,2,3,4,5} = {xx,yy,zz,xy,xz,yz}.
*
* @param[out] M The 3x3 NED moment tensor. This is an array of dimension [9].
*
*/
static void setM3x3(const int k, double *__restrict__ M)
{
memset(M, 0, 9*sizeof(double));
// mxx (fill mtt)
if (k == 0)
{
M[4] = 1.0; //M[0][0] = 1.0;
}
// myy (fill mpp)
else if (k == 1)
{
M[8] = 1.0; //M[1][1] = 1.0;
}
// mzz (fill mrr)
else if (k == 2)
{
M[0] = 1.0; //M[2][2] = 1.0;
}
// mxy and myz (fill mtp)
else if (k == 3)
{
M[5] =-1.0; //M[0][1] = 1.0;
M[7] =-1.0; //M[1][0] = 1.0;
}
// mxz and mzx (fill mrp)
else if (k == 4)
{
M[1] = 1.0; //M[0][2] = 1.0;
M[3] = 1.0; //M[2][0] = 1.0;
}
// myz and mzy (fill mrp)
else
{
M[2] =-1.0; //M[1][2] = 1.0;
M[6] =-1.0; //M[2][1] = 1.0;
}
/*
// mxx
if (k == 0)
{
M[0] = 1.0; //M[0][0] = 1.0;
}
// myy
else if (k == 1)
{
M[4] = 1.0; //M[1][1] = 1.0;
}
// mzz
else if (k == 2)
{
M[8] = 1.0; //M[2][2] = 1.0;
}
// mxy and myz
else if (k == 3)
{
M[1] = 1.0; //M[0][1] = 1.0;
M[3] = 1.0; //M[1][0] = 1.0;
}
// mxz and mzx
else if (k == 4)
{
M[2] = 1.0; //M[0][2] = 1.0;
M[6] = 1.0; //M[2][0] = 1.0;
}
// myz and mzy
else
{
M[5] = 1.0; //M[1][2] = 1.0;
M[7] = 1.0; //M[2][1] = 1.0;
}
*/
return;
}
//============================================================================//
/*!
* @brief Computes the contraction in Aki and Richards Eqn 4.96
* for the given basis vectors and moment tensor.
*/
static double computeContraction3x3(const double *__restrict__ a,
const double *__restrict__ M,
const double *__restrict__ b)
{
double res;
int i, ij, j;
res = 0.0;
for (i=0; i<3; i++)
{
for (j=0; j<3; j++)
{
ij = 3*i + j;
res = res + a[i]*M[ij]*b[j];
}
}
return res;
}
//============================================================================//
/*!
* @brief Tabulates the objective function over the earthquake locations.
*
* @param[in] locComm Location MPI communicator.
* @param[in] blockSize Block-size for performing matrix-matrix
* multiplications.
* @param[in] polarityData Contains the polarity data and the Green's functions.
* @param[in] mtloc Contains the local moment tensors in this rank's
* grid search.
*
* @param[out] phi The objective function tabulated for all locations
* and moment tensors. This is only accessed by
* master process in locComm; and for this case has
* dimension [nlocs x mtloc.nmtAll] with leading
* dimension mtloc.nmtAll. Otherwise, this can be NULL.
*
* @result 0 indicates success.
*
* @copyright ISTI distributed under the Apache 2 license.
*
*/
int polarity_performLocationSearch64f(const MPI_Comm locComm,
const int blockSize,
struct polarityData_struct polarityData,
struct localMT_struct mtloc,
double *__restrict__ phi)
{
double *Dmat, *G, *Sigma, *phiLoc, *phiWork;
int icol, ierr, ierrAll, iloc, ipol, jloc, jndx,
kt, mylocID, nlocProcs, mblock, pad;
const int nPolarity = polarityData.nPolarity;
const int Mrows = nPolarity;
const int Kcols = 6;
const int nlocs = polarityData.nlocs;
const int ldm = mtloc.ldm;
const int nmt = mtloc.nmt;
const int master = 0;
// Initialize
ierr = 0;
ierrAll = 0;
MPI_Comm_size(locComm, &nlocProcs);
MPI_Comm_rank(locComm, &mylocID);
// Compute padding for 64 bit data alignment in observations and mt blocks
pad = computePadding64f(blockSize);
mblock = blockSize + pad;
// Set space
phiLoc = memory_calloc64f(nmt);
G = memory_calloc64f(nPolarity*LDG);
Dmat = memory_calloc64f(nPolarity*mblock);
Sigma = array_set64f(nPolarity, 1.0, &ierr); // Default to identity
phiWork = memory_calloc64f(nlocs*mtloc.nmtAll);
// Set the row major data matrix where each row is an observation
for (ipol=0; ipol<nPolarity; ipol++)
{
for (icol=0; icol<mblock; icol++)
{
Dmat[ipol*mblock+icol] = polarityData.polarity[ipol];
if (icol == 1){printf("%f\n", polarityData.polarity[ipol]);}
}
}
// Input weights are given - copy them to diagonal weight matrix
if (polarityData.wts != NULL)
{
ierr = array_copy64f_work(nPolarity, polarityData.wts, Sigma);
}
// Loop on the source locations in the grid search
for (jloc=0; jloc<nlocs; jloc=jloc+nlocProcs)
{
iloc = jloc + mylocID;
kt = iloc*nPolarity;
// Assemble the row major Green's functions matrix
array_zeros64f_work(LDG*nPolarity, G);
#pragma omp simd
for (ipol=0; ipol<nPolarity; ipol++)
{
G[LDG*ipol+0] = polarityData.Gxx[kt+ipol];
G[LDG*ipol+1] = polarityData.Gyy[kt+ipol];
G[LDG*ipol+2] = polarityData.Gzz[kt+ipol];
G[LDG*ipol+3] = polarityData.Gxy[kt+ipol];
G[LDG*ipol+4] = polarityData.Gxz[kt+ipol];
G[LDG*ipol+5] = polarityData.Gyz[kt+ipol];
}
// Perform the polarity search for all mt's at this location
ierr = performPolaritySearch64f(nmt, ldm,
nPolarity,
blockSize, mblock,
Mrows, Kcols,
Dmat, G, Sigma, mtloc.mts,
phiLoc);
if (ierr != 0)
{
fprintf(stderr, "%s: Error in mt polarity search\n", __func__);
ierrAll = ierrAll + 1;
}
// Gather the moment tensor search results onto the master
jndx = 0;
if (mtloc.myid == master){jndx = iloc*mtloc.nmtAll;}
if (mtloc.commSize == 1)
{
array_copy64f_work(mtloc.nmt, phiLoc, &phiWork[jndx]);
}
else
{
MPI_Gatherv(phiLoc, mtloc.nmt, MPI_DOUBLE,
&phiWork[jndx], mtloc.nmtProc, mtloc.offset,
MPI_DOUBLE, master, mtloc.comm);
}
fprintf(stdout, "%s: mylocID: %d min: %f max: %f\n",
__func__, mylocID, array_min64f(nmt, phiLoc, &ierr),
array_max64f(nmt, phiLoc, &ierr));
}
// Reduce the search onto the master
//if (linLocComm && mylocID == master)
{
MPI_Reduce(phiWork, phi, nlocs*mtloc.nmtAll, MPI_DOUBLE,
MPI_SUM, master, locComm);
}
// Free memory
memory_free64f(&phiWork);
memory_free64f(&phiLoc);
memory_free64f(&G);
memory_free64f(&Dmat);
memory_free64f(&Sigma);
return 0;
}
//============================================================================//
/*!
* @brief Performs the polarity search for a host of moment tensors.
*
* @param[in] nmt Number of moment tensors.
* @param[in] ldm Leading dimension of moment tensor matrix. This must
* be at least 6.
* @param[in] nPolarity The number of polarities (observations).
* @param[in] blockSize Controls the number of forward problems GM to compute
* simultaneously.
* @param[in] mblock The largest block size. This serves as a leading
* dimension.
* @param[in] Mrows Number of rows in Green's functions matrix.
* @param[in] Kcols Number of columns in Green's functions matrix. This
* should be 6.
* @param[in] Dmat The polarity data replicated to the max block size.
* This has dimension [nPolarity x mblock] with leading
* dimension mblock.
* @param[in] G Forward modeling matrix. This has dimension
* [nPolarity x LDG] with leading dimension LDG.
* @param[in] Sigma These are the data weights and has dimension
* [nPolarity].
* @param[in] mts Matrix of moment tensors and has dimension [nmt x ldm].
* with leading dimension ldm.
*
* @param[out] phi The objective function (variance reduction) tabulated
* for all the moment tensors and observations. This has
* dimension [nmt].
*
* @result 0 indicates success.
*
* @copyright ISTI distributed under the Apache 2 license.
*
*/
static int performPolaritySearch64f(const int nmt, const int ldm,
const int nPolarity,
const int blockSize, const int mblock,
const int Mrows, const int Kcols,
const double *__restrict__ Dmat,
const double *__restrict__ G,
const double *__restrict__ Sigma,
const double *__restrict__ mts,
double *__restrict__ phi)
{
double *U, *Usign, *res2, *sqrtSigmaWt, res, traceSigma;
int i, ic, ierr, ipol, jmt, kmt, nmtBlocks, Ncols;
const double one = 1.0;
const double zero = 0.0;
ierr = 0;
nmtBlocks = (int) ((double) (nmt)/(double) (blockSize) + 1.0);
if (nmtBlocks*blockSize < nmt)
{
fprintf(stderr, "%s: Internal error - all mts wont be visited\n",
__func__);
return -1;
}
#ifdef __INTEL_COMPILER
__assume_aligned(G, 64);
__assume_aligned(Dmat, 64);
#endif
// The VR in Chiang 2016 is:
// (1 - \frac{ \sum_i w_i (Pol_{i,obs} - Pol_{i,est})^2 }
// { \sum_i w_i (Pol_{i,obs}^2) } )
// Because Pol_{obs} and Pol_{est} take values of + or -1 we can
// reduce the demoniator so that the VR is
// (1 - \frac{ \sum_i w_i (Pol_{i,obs} - Pol_{i,est})^2 }
// { \sum_i w_i })
// Furthermore, we can incorporate this term into the weighting
// function s.t. Sigma/trace(Sigma) to obtain
// (1 - \frac{ \sum_i \hat{Sigma} (Pol_{i,obs} - Pol_{i,est})^2)
// Finally, because we want likelihoods we want to rescale the
// residual squared from [0,4] to [0,1]. To do this we multiply
// the weight by 0.5 s.t. the residual [-2,2] goes to [-1,1] which,
// when squared, fits in the desired bounds of [0,1]; i.e. multiply by
// (1/2)^2 = 0.25
traceSigma = array_sum64f(nPolarity, Sigma, &ierr);
if (ierr != 0){traceSigma = 1.0;}
sqrtSigmaWt = memory_calloc64f(nPolarity);
#ifdef __INTEL_COMPILER
__assume_aligned(sqrtSigmaWt, ISCL_MEMORY_ALIGN);
#endif
for (i=0; i<nPolarity; i++){sqrtSigmaWt[i] = sqrt(0.25*Sigma[i]/traceSigma);}
#pragma omp parallel \
private (i, ic, ipol, jmt, kmt, Ncols, res, res2, U, Usign) \
shared (G, Dmat, mts, nmtBlocks, phi, sqrtSigmaWt) \
default (none) reduction(+:ierr)
{
U = memory_calloc64f(mblock*nPolarity);
res2 = memory_calloc64f(mblock);
Usign = memory_calloc64f(mblock*nPolarity);
#ifdef __INTEL_COMPILER
__assume_aligned(res2, ISCL_MEMORY_ALIGN);
__assume_aligned(Usign, ISCL_MEMORY_ALIGN);
#endif
#pragma omp for
for (kmt=0; kmt<nmtBlocks; kmt++)
{
jmt = kmt*blockSize;
Ncols = MIN(blockSize, nmt - jmt); // Number of columns of M
// Compute U = GM
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
Mrows, Ncols, Kcols, one, G, LDG,
&mts[ldm*jmt], ldm, zero, U, mblock);
// Make the theoretical polarity +1 or -1 to match the data
array_copysign64f_work(mblock*nPolarity, U, Usign);
memset(res2, 0, (size_t) mblock*sizeof(double));
// Compute the weighted residual
for (ipol=0; ipol<nPolarity; ipol++)
{
#pragma omp simd aligned(Dmat, Usign, res2: ISCL_MEMORY_ALIGN)
for (ic=0; ic<Ncols; ic++)
{
// Change residual range from [0,2] to [0,1] and weight
res = sqrtSigmaWt[ipol]*( Dmat[ipol*mblock+ic]
- Usign[ipol*mblock+ic]);
res2[ic] = res2[ic] + res*res;
}
}
// Compute the variance reduction and put it into objective function
for (ic=0; ic<Ncols; ic++)
{
phi[jmt+ic] = 1.0 - res2[ic];
}
} // Loop on moment tensor blocks
memory_free64f(&Usign);
memory_free64f(&U);
memory_free64f(&res2);
} // end parallel section
memory_free64f(&sqrtSigmaWt);
return 0;
}
static int computePadding64f(const int n)
{
size_t mod, pad;
int ipad;
// Set space and make G matrix
pad = 0;
mod = ((size_t) n*sizeof(double))%64;
if (mod != 0)
{
pad = (64 - mod)/sizeof(double);
}
ipad = (int) pad;
return ipad;
}
|
bt.c | /*
* This software is Copyright (c) 2015 Sayantan Datta <std2048 at gmail dot com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
* Based on paper 'Perfect Spatial Hashing' by Lefebvre & Hoppe
*/
#ifdef HAVE_OPENCL
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <signal.h>
#include <unistd.h>
#include "misc.h" // error()
#include "bt_twister.h"
#include "bt_hash_types.h"
#if _OPENMP > 201107
#define MAYBE_PARALLEL_FOR _Pragma("omp for")
#define MAYBE_ATOMIC_WRITE _Pragma("omp atomic write")
#define MAYBE_ATOMIC_CAPTURE _Pragma("omp atomic capture")
#else
#define MAYBE_PARALLEL_FOR _Pragma("omp single")
#define MAYBE_ATOMIC_WRITE
#define MAYBE_ATOMIC_CAPTURE
#endif
typedef struct {
/* List of indexes linked to offset_data_idx */
unsigned int *hash_location_list;
unsigned short collisions;
unsigned short iter;
unsigned int offset_table_idx;
} auxilliary_offset_data;
/* Interface pointers */
static unsigned int (*zero_check_ht)(unsigned int);
static void (*assign_ht)(unsigned int, unsigned int);
static void (*assign0_ht)(unsigned int);
static unsigned int (*calc_ht_idx)(unsigned int, unsigned int);
static unsigned int (*get_offset)(unsigned int, unsigned int);
static void (*allocate_ht)(unsigned int, unsigned int);
static int (*test_tables)(unsigned int, OFFSET_TABLE_WORD *, unsigned int, unsigned int, unsigned int, unsigned int);
static unsigned int (*remove_duplicates)(unsigned int, unsigned int, unsigned int);
static void *loaded_hashes;
static unsigned int hash_type = 0;
static unsigned int binary_size_actual = 0;
static unsigned int num_loaded_hashes = 0;
unsigned int hash_table_size = 0, shift64_ht_sz = 0, shift128_ht_sz = 0;
static OFFSET_TABLE_WORD *offset_table = NULL;
static unsigned int offset_table_size = 0, shift64_ot_sz = 0, shift128_ot_sz = 0;
static auxilliary_offset_data *offset_data = NULL;
unsigned long long total_memory_in_bytes = 0;
static volatile sig_atomic_t signal_stop = 0;
static unsigned int verbosity;
static void alarm_handler(int sig)
{
if (sig == SIGALRM)
signal_stop = 1;
}
static unsigned int coprime_check(unsigned int m,unsigned int n)
{
unsigned int rem;
while (n != 0) {
rem = m % n;
m = n;
n = rem;
}
return m;
}
static void release_all_lists()
{
unsigned int i;
for (i = 0; i < offset_table_size; i++)
bt_free((void **)&(offset_data[i].hash_location_list));
}
int bt_malloc(void **ptr, size_t size)
{
*ptr = mem_alloc(size);
if (*ptr || !size)
return 0;
return 1;
}
int bt_calloc(void **ptr, size_t num, size_t size)
{
*ptr = mem_calloc(num, size);
if (*ptr || !num)
return 0;
return 1;
}
int bt_memalign_alloc(void **ptr, size_t alignment, size_t size)
{
*ptr = mem_alloc_align(size, alignment);
if (*ptr || !size)
return 0;
return 1;
}
void bt_free(void **ptr)
{
MEM_FREE((*ptr));
*ptr = NULL;
}
void bt_error_fn(const char *str, char *file, int line)
{
fprintf(stderr, "%s in file:%s, line:%d.\n", str, file, line);
error();
}
void bt_warn_fn(const char *str, char *file, int line)
{
fprintf(stderr, "%s in file:%s, line:%d.\n", str, file, line);
}
static unsigned int modulo_op(void * hash, unsigned int N, uint64_t shift64, uint64_t shift128)
{
if (hash_type == 64)
return modulo64_31b(*(uint64_t *)hash, N);
else if (hash_type == 128)
return modulo128_31b(*(uint128_t *)hash, N, shift64);
else if (hash_type == 192)
return modulo192_31b(*(uint192_t *)hash, N, shift64, shift128);
else
fprintf(stderr, "modulo op error\n");
return 0;
}
/* Exploits the fact that sorting with a bucket is not essential. */
static void in_place_bucket_sort(unsigned int num_buckets)
{
unsigned int *histogram;
unsigned int *histogram_empty;
unsigned int *prefix_sum;
unsigned int i;
if (bt_calloc((void **)&histogram, num_buckets + 1, sizeof(unsigned int)))
bt_error("Failed to allocate memory: histogram.");
if (bt_calloc((void **)&histogram_empty, num_buckets + 1, sizeof(unsigned int)))
bt_error("Failed to allocate memory: histogram_empty.");
if (bt_calloc((void **)&prefix_sum, num_buckets + 10, sizeof(unsigned int)))
bt_error("Failed to allocate memory: prefix_sum.");
i = 0;
while (i < offset_table_size)
histogram[num_buckets - offset_data[i++].collisions]++;
for (i = 1; i <= num_buckets; i++)
prefix_sum[i] = prefix_sum[i - 1] + histogram[i - 1];
i = 0;
while (i < prefix_sum[num_buckets]) {
unsigned int histogram_index = num_buckets - offset_data[i].collisions;
if (i >= prefix_sum[histogram_index] &&
histogram_index < num_buckets &&
i < prefix_sum[histogram_index + 1]) {
histogram_empty[histogram_index]++;
i++;
}
else {
auxilliary_offset_data tmp;
unsigned int swap_index = prefix_sum[histogram_index] + histogram_empty[histogram_index];
histogram_empty[histogram_index]++;
tmp = offset_data[i];
offset_data[i] = offset_data[swap_index];
offset_data[swap_index] = tmp;
}
}
bt_free((void **)&histogram);
bt_free((void **)&histogram_empty);
bt_free((void **)&prefix_sum);
}
static void init_tables(unsigned int approx_offset_table_sz, unsigned int approx_hash_table_sz)
{
unsigned int i, max_collisions, offset_data_idx;
uint64_t shift128;
if (verbosity > 1)
fprintf(stdout, "\nInitialing Tables...");
total_memory_in_bytes = 0;
approx_hash_table_sz |= 1;
/* Repeat until two sizes are coprimes */
while (coprime_check(approx_offset_table_sz, approx_hash_table_sz) != 1)
approx_offset_table_sz++;
offset_table_size = approx_offset_table_sz;
hash_table_size = approx_hash_table_sz;
if (hash_table_size > 0x7fffffff || offset_table_size > 0x7fffffff)
bt_error("Reduce the number of loaded hashes to < 0x7fffffff.");
shift64_ht_sz = (((1ULL << 63) % hash_table_size) * 2) % hash_table_size;
shift64_ot_sz = (((1ULL << 63) % offset_table_size) * 2) % offset_table_size;
shift128 = (uint64_t)shift64_ht_sz * shift64_ht_sz;
shift128_ht_sz = shift128 % hash_table_size;
shift128 = (uint64_t)shift64_ot_sz * shift64_ot_sz;
shift128_ot_sz = shift128 % offset_table_size;
if (bt_malloc((void **)&offset_table, offset_table_size * sizeof(OFFSET_TABLE_WORD)))
bt_error("Failed to allocate memory: offset_table.");
total_memory_in_bytes += offset_table_size * sizeof(OFFSET_TABLE_WORD);
if (bt_malloc((void **)&offset_data, offset_table_size * sizeof(auxilliary_offset_data)))
bt_error("Failed to allocate memory: offset_data.");
total_memory_in_bytes += offset_table_size * sizeof(auxilliary_offset_data);
max_collisions = 0;
#if _OPENMP
#pragma omp parallel private(i, offset_data_idx)
#endif
{
#if _OPENMP
#pragma omp for
#endif
for (i = 0; i < offset_table_size; i++) {
//memset(&offset_data[i], 0, sizeof(auxilliary_offset_data));
offset_data[i].offset_table_idx = 0;
offset_data[i].collisions = 0;
offset_data[i].hash_location_list = NULL;
offset_data[i].iter = 0;
offset_table[i] = 0;
}
#if _OPENMP
#pragma omp barrier
#endif
/* Build Auxiliary data structure for offset_table. */
#if _OPENMP
#pragma omp for
#endif
for (i = 0; i < num_loaded_hashes; i++) {
offset_data_idx = modulo_op(loaded_hashes + i * binary_size_actual, offset_table_size, shift64_ot_sz, shift128_ot_sz);
#if _OPENMP
#pragma omp atomic
#endif
offset_data[offset_data_idx].collisions++;
}
#if _OPENMP
#pragma omp barrier
#pragma omp single
#endif
for (i = 0; i < offset_table_size; i++)
if (offset_data[i].collisions) {
if (bt_malloc((void **)&offset_data[i].hash_location_list, offset_data[i].collisions * sizeof(unsigned int)))
bt_error("Failed to allocate memory: offset_data[i].hash_location_list.");
if (offset_data[i].collisions > max_collisions)
max_collisions = offset_data[i].collisions;
}
#if _OPENMP
#pragma omp barrier
MAYBE_PARALLEL_FOR
#endif
for (i = 0; i < num_loaded_hashes; i++) {
unsigned int iter;
offset_data_idx = modulo_op(loaded_hashes + i * binary_size_actual, offset_table_size, shift64_ot_sz, shift128_ot_sz);
#if _OPENMP
MAYBE_ATOMIC_WRITE
#endif
offset_data[offset_data_idx].offset_table_idx = offset_data_idx;
#if _OPENMP
MAYBE_ATOMIC_CAPTURE
#endif
iter = offset_data[offset_data_idx].iter++;
offset_data[offset_data_idx].hash_location_list[iter] = i;
}
#if _OPENMP
#pragma omp barrier
#endif
}
total_memory_in_bytes += num_loaded_hashes * sizeof(unsigned int);
//qsort((void *)offset_data, offset_table_size, sizeof(auxilliary_offset_data), qsort_compare);
in_place_bucket_sort(max_collisions);
if (verbosity > 1)
fprintf(stdout, "Done\n");
allocate_ht(num_loaded_hashes, verbosity);
if (verbosity > 2) {
fprintf(stdout, "Offset Table Size %Lf %% of Number of Loaded Hashes.\n", ((long double)offset_table_size / (long double)num_loaded_hashes) * 100.00);
fprintf(stdout, "Offset Table Size(in GBs):%Lf\n", ((long double)offset_table_size * sizeof(OFFSET_TABLE_WORD)) / ((long double)1024 * 1024 * 1024));
fprintf(stdout, "Offset Table Aux Data Size(in GBs):%Lf\n", ((long double)offset_table_size * sizeof(auxilliary_offset_data)) / ((long double)1024 * 1024 * 1024));
fprintf(stdout, "Offset Table Aux List Size(in GBs):%Lf\n", ((long double)num_loaded_hashes * sizeof(unsigned int)) / ((long double)1024 * 1024 * 1024));
for (i = 0; i < offset_table_size && offset_data[i].collisions; i++)
;
fprintf (stdout, "Unused Slots in Offset Table:%Lf %%\n", 100.00 * (long double)(offset_table_size - i) / (long double)(offset_table_size));
fprintf(stdout, "Total Memory Use(in GBs):%Lf\n", ((long double)total_memory_in_bytes) / ((long double) 1024 * 1024 * 1024));
}
}
static unsigned int check_n_insert_into_hash_table(unsigned int offset, auxilliary_offset_data * ptr, unsigned int *hash_table_idxs, unsigned int *store_hash_modulo_table_sz)
{
unsigned int i;
i = 0;
while (i < ptr -> collisions) {
hash_table_idxs[i] = store_hash_modulo_table_sz[i] + offset;
if (hash_table_idxs[i] >= hash_table_size)
hash_table_idxs[i] -= hash_table_size;
if (zero_check_ht(hash_table_idxs[i++]))
return 0;
}
i = 0;
while (i < ptr -> collisions) {
if (zero_check_ht(hash_table_idxs[i])) {
unsigned int j = 0;
while (j < i)
assign0_ht(hash_table_idxs[j++]);
return 0;
}
assign_ht(hash_table_idxs[i], ptr -> hash_location_list[i]);
i++;
}
return 1;
}
static void calc_hash_mdoulo_table_size(unsigned int *store, auxilliary_offset_data * ptr) {
unsigned int i = 0;
while (i < ptr -> collisions) {
store[i] = modulo_op(loaded_hashes + (ptr -> hash_location_list[i]) * binary_size_actual, hash_table_size, shift64_ht_sz, shift128_ht_sz);
i++;
}
}
static unsigned int create_tables()
{
unsigned int i;
unsigned int bitmap = ((1ULL << (sizeof(OFFSET_TABLE_WORD) * 8)) - 1) & 0xFFFFFFFF;
unsigned int limit = bitmap % hash_table_size + 1;
unsigned int hash_table_idx;
unsigned int *store_hash_modulo_table_sz;
unsigned int *hash_table_idxs;
#ifdef ENABLE_BACKTRACKING
OFFSET_TABLE_WORD last_offset;
unsigned int backtracking = 0;
#endif
unsigned int trigger;
long double done = 0;
struct timeval t;
if (bt_malloc((void **)&store_hash_modulo_table_sz, offset_data[0].collisions * sizeof(unsigned int)))
bt_error("Failed to allocate memory: store_hash_modulo_table_sz.");
if (bt_malloc((void **)&hash_table_idxs, offset_data[0].collisions * sizeof(unsigned int)))
bt_error("Failed to allocate memory: hash_table_idxs.");
gettimeofday(&t, NULL);
seedMT(t.tv_sec + t.tv_usec);
i = 0;
trigger = 0;
while (offset_data[i].collisions > 1) {
OFFSET_TABLE_WORD offset;
unsigned int num_iter;
done += offset_data[i].collisions;
calc_hash_mdoulo_table_size(store_hash_modulo_table_sz, &offset_data[i]);
offset = (OFFSET_TABLE_WORD)(randomMT() & bitmap) % hash_table_size;
#ifdef ENABLE_BACKTRACKING
if (backtracking) {
offset = (last_offset + 1) % hash_table_size;
backtracking = 0;
}
#endif
alarm(3);
num_iter = 0;
while (!check_n_insert_into_hash_table((unsigned int)offset, &offset_data[i], hash_table_idxs, store_hash_modulo_table_sz) && num_iter < limit) {
offset++;
if (offset >= hash_table_size) offset = 0;
num_iter++;
}
offset_table[offset_data[i].offset_table_idx] = offset;
if ((trigger & 0xffff) == 0) {
trigger = 0;
if (verbosity > 0) {
fprintf(stdout, "\rProgress:%Lf %%, Number of collisions:%u", done / (long double)num_loaded_hashes * 100.00, offset_data[i].collisions);
fflush(stdout);
}
alarm(0);
}
if (signal_stop) {
alarm(0);
signal_stop = 0;
fprintf(stderr, "\nProgress is too slow!! trying next table size.\n");
bt_free((void **)&hash_table_idxs);
bt_free((void **)&store_hash_modulo_table_sz);
return 0;
}
trigger++;
if (num_iter == limit) {
#ifdef ENABLE_BACKTRACKING
if (num_loaded_hashes > 1000000) {
unsigned int j, backtrack_steps, iter;
done -= offset_data[i].collisions;
offset_table[offset_data[i].offset_table_idx] = 0;
backtrack_steps = 1;
j = 1;
while (j <= backtrack_steps && (int)(i - j) >= 0) {
last_offset = offset_table[offset_data[i - j].offset_table_idx];
iter = 0;
while (iter < offset_data[i - j].collisions) {
hash_table_idx =
calc_ht_idx(offset_data[i - j].hash_location_list[iter],
last_offset);
assign0_ht(hash_table_idx);
iter++;
}
offset_table[offset_data[i - j].offset_table_idx] = 0;
done -= offset_data[i - j].collisions;
j++;
}
i -= (j - 1);
backtracking = 1;
continue;
}
#endif
bt_free((void **)&hash_table_idxs);
bt_free((void **)&store_hash_modulo_table_sz);
return 0;
}
i++;
}
alarm(0);
hash_table_idx = 0;
while (offset_data[i].collisions > 0) {
done++;
while (hash_table_idx < hash_table_size) {
if (!zero_check_ht(hash_table_idx)) {
assign_ht(hash_table_idx, offset_data[i].hash_location_list[0]);
break;
}
hash_table_idx++;
}
offset_table[offset_data[i].offset_table_idx] = get_offset(hash_table_idx, offset_data[i].hash_location_list[0]);
if ((trigger & 0xffff) == 0) {
trigger = 0;
if (verbosity > 0) {
fprintf(stdout, "\rProgress:%Lf %%, Number of collisions:%u", done / (long double)num_loaded_hashes * 100.00, offset_data[i].collisions);
fflush(stdout);
}
}
trigger++;
i++;
}
bt_free((void **)&hash_table_idxs);
bt_free((void **)&store_hash_modulo_table_sz);
return 1;
}
static unsigned int next_prime(unsigned int num)
{
if (num == 1)
return 2;
else if (num == 2)
return 3;
else if (num == 3 || num == 4)
return 5;
else if (num == 5 || num == 6)
return 7;
else if (num >= 7 && num <= 9)
return 1;
/* else if (num == 11 || num == 12)
return 13;
else if (num >= 13 && num < 17)
return 17;
else if (num == 17 || num == 18)
return 19;
else if (num >= 19 && num < 23)
return 23;
else if (num >= 23 && num < 29)
return 29;
else if (num == 29 || num == 30 )
return 31;
else if (num >= 31 && num < 37)
return 37;
else if (num >= 37 && num < 41)
return 41;
else if (num == 41 || num == 42 )
return 43;
else if (num >= 43 && num < 47)
return 47;
else if (num >= 47 && num < 53)
return 53;
else if (num >= 53 && num < 59)
return 59;
else if (num == 59 || num == 60)
return 61;
else if (num >= 61 && num < 67)
return 67;
else if (num >= 67 && num < 71)
return 71;
else if (num == 71 || num == 72)
return 73;
else if (num >= 73 && num < 79)
return 79;
else if (num >= 79 && num < 83)
return 83;
else if (num >= 83 && num < 89)
return 89;
else if (num >= 89 && num < 97)
return 97;
else
return 1;*/
return 1;
}
unsigned int create_perfect_hash_table(int htype, void *loaded_hashes_ptr,
unsigned int num_ld_hashes,
OFFSET_TABLE_WORD **offset_table_ptr,
unsigned int *offset_table_sz_ptr,
unsigned int *hash_table_sz_ptr,
unsigned int verb)
{
long double multiplier_ht, multiplier_ot, inc_ht, inc_ot;
unsigned int approx_hash_table_sz, approx_offset_table_sz, i, dupe_remove_ht_sz;
struct sigaction new_action, old_action;
struct itimerval old_it;
total_memory_in_bytes = 0;
hash_type = htype;
loaded_hashes = loaded_hashes_ptr;
verbosity = verb;
if (hash_type == 64) {
zero_check_ht = zero_check_ht_64;
assign_ht = assign_ht_64;
assign0_ht = assign0_ht_64;
calc_ht_idx = calc_ht_idx_64;
get_offset = get_offset_64;
allocate_ht = allocate_ht_64;
test_tables = test_tables_64;
remove_duplicates = remove_duplicates_64;
loaded_hashes_64 = (uint64_t *)loaded_hashes;
binary_size_actual = 8;
if (verbosity > 1)
fprintf(stdout, "Using Hash type 64.\n");
}
else if (hash_type == 128) {
zero_check_ht = zero_check_ht_128;
assign_ht = assign_ht_128;
assign0_ht = assign0_ht_128;
calc_ht_idx = calc_ht_idx_128;
get_offset = get_offset_128;
allocate_ht = allocate_ht_128;
test_tables = test_tables_128;
remove_duplicates = remove_duplicates_128;
loaded_hashes_128 = (uint128_t *)loaded_hashes;
binary_size_actual = 16;
if (verbosity > 1)
fprintf(stdout, "Using Hash type 128.\n");
}
else if (hash_type == 192) {
zero_check_ht = zero_check_ht_192;
assign_ht = assign_ht_192;
assign0_ht = assign0_ht_192;
calc_ht_idx = calc_ht_idx_192;
get_offset = get_offset_192;
allocate_ht = allocate_ht_192;
test_tables = test_tables_192;
remove_duplicates = remove_duplicates_192;
loaded_hashes_192 = (uint192_t *)loaded_hashes;
binary_size_actual = 24;
if (verbosity > 1)
fprintf(stdout, "Using Hash type 192.\n");
}
new_action.sa_handler = alarm_handler;
sigemptyset(&new_action.sa_mask);
new_action.sa_flags = 0;
if (sigaction(SIGALRM, NULL, &old_action) < 0)
bt_error("Error retriving signal info.");
if (sigaction(SIGALRM, &new_action, NULL) < 0)
bt_error("Error setting new signal handler.");
if (getitimer(ITIMER_REAL, &old_it) < 0)
bt_error("Error retriving timer info.");
inc_ht = 0.005;
inc_ot = 0.05;
if (num_ld_hashes <= 100) {
multiplier_ot = 1.501375173;
inc_ht = 0.05;
inc_ot = 0.5;
dupe_remove_ht_sz = 128;
}
else if (num_ld_hashes <= 1000) {
multiplier_ot = 1.101375173;
dupe_remove_ht_sz = 1024;
}
else if (num_ld_hashes <= 10000) {
multiplier_ot = 1.151375173;
dupe_remove_ht_sz = 16384;
}
else if (num_ld_hashes <= 100000) {
multiplier_ot = 1.20375173;
dupe_remove_ht_sz = 131072;
}
else if (num_ld_hashes <= 1000000) {
multiplier_ot = 1.25375173;
dupe_remove_ht_sz = 1048576;
}
else if (num_ld_hashes <= 10000000) {
multiplier_ot = 1.31375173;
dupe_remove_ht_sz = 16777216;
}
else if (num_ld_hashes <= 20000000) {
multiplier_ot = 1.35375173;
dupe_remove_ht_sz = 33554432;
}
else if (num_ld_hashes <= 50000000) {
multiplier_ot = 1.41375173;
dupe_remove_ht_sz = 67108864;
}
else if (num_ld_hashes <= 110000000) {
multiplier_ot = 1.51375173;
dupe_remove_ht_sz = 134217728;
}
else if (num_ld_hashes <= 200000000) {
multiplier_ot = 1.61375173;
dupe_remove_ht_sz = 134217728 * 2;
}
else {
fprintf(stderr, "This many number of hashes have never been tested before and might not succeed!!\n");
multiplier_ot = 3.01375173;
dupe_remove_ht_sz = 134217728 * 4;
}
num_loaded_hashes = remove_duplicates(num_ld_hashes, dupe_remove_ht_sz, verbosity);
if (!num_loaded_hashes)
bt_error("Failed to remove duplicates.");
multiplier_ht = 1.001097317;
approx_offset_table_sz = (((long double)num_loaded_hashes / 4.0) * multiplier_ot + 10.00);
approx_hash_table_sz = ((long double)num_loaded_hashes * multiplier_ht);
i = 0;
do {
unsigned int temp;
init_tables(approx_offset_table_sz, approx_hash_table_sz);
if (create_tables()) {
if (verbosity > 0)
fprintf(stdout, "\n");
break;
}
if (verbosity > 0)
fprintf(stdout, "\n");
release_all_lists();
bt_free((void **)&offset_data);
bt_free((void **)&offset_table);
if (hash_type == 64)
bt_free((void **)&hash_table_64);
else if (hash_type == 128)
bt_free((void **)&hash_table_128);
else if (hash_type == 192)
bt_free((void **)&hash_table_192);
temp = next_prime(approx_offset_table_sz % 10);
approx_offset_table_sz /= 10;
approx_offset_table_sz *= 10;
approx_offset_table_sz += temp;
i++;
if (!(i % 5)) {
multiplier_ot += inc_ot;
multiplier_ht += inc_ht;
approx_offset_table_sz = (((long double)num_loaded_hashes / 4.0) * multiplier_ot + 10.00);
approx_hash_table_sz = ((long double)num_loaded_hashes * multiplier_ht);
}
} while(1);
release_all_lists();
bt_free((void **)&offset_data);
*offset_table_ptr = offset_table;
*hash_table_sz_ptr = hash_table_size;
*offset_table_sz_ptr = offset_table_size;
if (sigaction(SIGALRM, &old_action, NULL) < 0)
bt_error("Error restoring previous signal handler.");
if (setitimer(ITIMER_REAL, &old_it, NULL) < 0)
bt_error("Error restoring previous timer.");
if (!test_tables(num_loaded_hashes, offset_table, offset_table_size, shift64_ot_sz, shift128_ot_sz, verbosity))
return 0;
return num_loaded_hashes;
}
/*static int qsort_compare(const void *p1, const void *p2)
{
auxilliary_offset_data *a = (auxilliary_offset_data *)p1;
auxilliary_offset_data *b = (auxilliary_offset_data *)p2;
if (a[0].collisions > b[0].collisions) return -1;
if (a[0].collisions == b[0].collisions) return 0;
return 1;
}*/
#endif
|
be9354d76c55.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
#include <stdio.h>
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
double section2;
};
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r47_vec, float **restrict r48_vec, const int time, const int tw);
int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*r17)[y_size + 1][z_size + 1];
posix_memalign((void **)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r18)[y_size + 1][z_size + 1];
posix_memalign((void **)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r19)[y_size + 1][z_size + 1];
posix_memalign((void **)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r20)[y_size + 1][z_size + 1];
posix_memalign((void **)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r21)[y_size + 1][z_size + 1];
posix_memalign((void **)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float **r47;
posix_memalign((void **)&r47, 64, sizeof(float *) * nthreads);
float **r48;
posix_memalign((void **)&r48, 64, sizeof(float *) * nthreads);
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
int xb_size = block_sizes[0];
int sf = 2;
int t_blk_size = 2 * sf * (time_M - time_m);
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
posix_memalign((void **)&r47[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1]));
posix_memalign((void **)&r48[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1]));
}
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(2) schedule(static, 1)
for (int x = x_m - 1; x <= x_M; x += 1)
{
for (int y = y_m - 1; y <= y_M; y += 1)
{
#pragma omp simd aligned(delta, phi, theta : 64)
for (int z = z_m - 1; z <= z_M; z += 1)
{
r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]);
r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]);
r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]);
r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]);
r17[x + 1][y + 1][z + 1] = sqrt(2 * delta[x + 4][y + 4][z + 4] + 1);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size);
for (int t_blk = time_m; t_blk <= 1 + sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block
{
for (int xb = x_m - 1; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size)
{
//printf(" Change of outer xblock %d \n", xb);
for (int yb = y_m - 1; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size)
{
for (int time = t_blk, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= 2 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3))
{
int tw = ((time / sf) % (time_M - time_m + 1));
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
bf0(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x0_blk0_size, x_size, y0_blk0_size, y_size, z_size, t0, t1, t2, x_M, x_m, y_M, y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, (float **)r47, (float **)r48, time, tw);
// x_M - (x_M - x_m + 1)%(x0_blk0_size), x_m, y_M - (y_M - y_m + 1)%(y0_blk0_size), y_m,
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000;
}
}
}
}
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
free(r47[tid]);
free(r48[tid]);
}
free(r17);
free(r18);
free(r19);
free(r20);
free(r21);
free(r47);
free(r48);
return 0;
}
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, float **restrict r47_vec, float **restrict r48_vec, const int time, const int tw)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data;
float(*restrict r17)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r17_vec;
float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec;
float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec;
float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec;
float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
float **r47 = (float **)r47_vec;
float **r48 = (float **)r48_vec;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
float(*restrict r34)[y0_blk0_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 1][z_size + 1]) r47[tid];
float(*restrict r35)[y0_blk0_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 1][z_size + 1]) r48[tid];
#pragma omp for collapse(2) schedule(dynamic, 1)
for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size)
{
for (int x = x0_blk0 - 1, xs = 0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++, xs++)
{
for (int y = y0_blk0 - 1, ys = 0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++, ys++)
{
//printf(" bf0 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 4, y - time + 4, xs, ys);
#pragma omp simd aligned(u, v : 64)
for (int z = z_m - 1; z <= z_M; z += 1)
{
float r39 = -u[t0][x - time + 4][y - time + 4][z + 4];
r34[xs][ys][z + 1] = 1.0e-1F * (-(r39 + u[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r39 + u[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r39 + u[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
float r40 = -v[t0][x - time + 4][y - time + 4][z + 4];
r35[xs][ys][z + 1] = 1.0e-1F * (-(r40 + v[t0][x - time + 4][y - time + 4][z + 5]) * r18[x - time + 1][y - time + 1][z + 1] - (r40 + v[t0][x - time + 4][y - time + 5][z + 4]) * r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] - (r40 + v[t0][x - time + 5][y - time + 4][z + 4]) * r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1]);
}
}
}
for (int x = x0_blk0, xs = 0; x <= min(min((x_M + time), (xb + xb_size - 1)), (x0_blk0 + x0_blk0_size - 1)); x++, xs++)
{
for (int y = y0_blk0, ys = 0; y <= min(min((y_M + time), (yb + yb_size - 1)), (y0_blk0 + y0_blk0_size - 1)); y++, ys++)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", tw, x - time + 4, y - time + 4, xs, ys);
#pragma omp simd aligned(damp, epsilon, u, v, vp : 64)
for (int z = z_m; z <= z_M; z += 1)
{
float r46 = 1.0 / dt;
float r45 = 1.0 / (dt * dt);
float r44 = r18[x - time + 1][y - time + 1][z] * r35[xs + 1][ys + 1][z] - r18[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1] + r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r35[xs + 1][ys][z + 1] - r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1] + r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r35[xs][ys + 1][z + 1] - r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r35[xs + 1][ys + 1][z + 1];
float r43 = 1.0 / (vp[x - time + 4][y - time + 4][z + 4] * vp[x - time + 4][y - time + 4][z + 4]);
float r42 = 1.0e-1F * (-r18[x - time + 1][y - time + 1][z] * r34[xs + 1][ys + 1][z] + r18[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1] - r19[x - time + 1][y - time][z + 1] * r20[x - time + 1][y - time][z + 1] * r34[xs + 1][ys][z + 1] + r19[x - time + 1][y - time + 1][z + 1] * r20[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1] - r20[x - time][y - time + 1][z + 1] * r21[x - time][y - time + 1][z + 1] * r34[xs][ys + 1][z + 1] + r20[x - time + 1][y - time + 1][z + 1] * r21[x - time + 1][y - time + 1][z + 1] * r34[xs + 1][ys + 1][z + 1]) - 8.33333315e-4F * (u[t0][x - time + 2][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 2][z + 4] + u[t0][x - time + 4][y - time + 4][z + 2] + u[t0][x - time + 4][y - time + 4][z + 6] + u[t0][x - time + 4][y - time + 6][z + 4] + u[t0][x - time + 6][y - time + 4][z + 4]) + 1.3333333e-2F * (u[t0][x - time + 3][y - time + 4][z + 4] + u[t0][x - time + 4][y - time + 3][z + 4] + u[t0][x - time + 4][y - time + 4][z + 3] + u[t0][x - time + 4][y - time + 4][z + 5] + u[t0][x - time + 4][y - time + 5][z + 4] + u[t0][x - time + 5][y - time + 4][z + 4]) - 7.49999983e-2F * u[t0][x - time + 4][y - time + 4][z + 4];
float r41 = 1.0 / (r43 * r45 + r46 * damp[x - time + 1][y - time + 1][z + 1]);
float r32 = r45 * (-2.0F * u[t0][x - time + 4][y - time + 4][z + 4] + u[t1][x - time + 4][y - time + 4][z + 4]);
float r33 = r45 * (-2.0F * v[t0][x - time + 4][y - time + 4][z + 4] + v[t1][x - time + 4][y - time + 4][z + 4]);
u[t2][x - time + 4][y - time + 4][z + 4] = r41 * ((-r32) * r43 + r42 * (2 * epsilon[x - time + 4][y - time + 4][z + 4] + 1) + 1.0e-1F * r44 * r17[x - time + 1][y - time + 1][z + 1] + r46 * (damp[x - time + 1][y - time + 1][z + 1] * u[t0][x - time + 4][y - time + 4][z + 4]));
v[t2][x - time + 4][y - time + 4][z + 4] = r41 * ((-r33) * r43 + r42 * r17[x - time + 1][y - time + 1][z + 1] + 1.0e-1F * r44 + r46 * (damp[x - time + 1][y - time + 1][z + 1] * v[t0][x - time + 4][y - time + 4][z + 4]));
}
int sp_zi_M = nnz_sp_source_mask[x - time][y - time] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1)
{
int zind = sp_source_mask[x - time][y - time][sp_zi];
float r22 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
u[t2][x - time + 4][y - time + 4][zind + 4] += r22;
float r23 = save_src_v[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind];
v[t2][x - time + 4][y - time + 4][zind + 4] += r23;
//printf("Source injection at time %d , at : x: %d, y: %d, %d, %f, %f \n", tw, x - time + 4, y - time + 4, zind + 4, r22, r23);
}
}
}
}
}
}
}
|
constant_density_acoustic_time_scalar_2D_4.h | #ifndef __CDA_TIME_SCALAR_2D_4__
#define __CDA_TIME_SCALAR_2D_4__
#include <stdlib.h>
template< typename T, int ACCURACY >
void cda_time_scalar_2D_4( T* km1_u, int nr_km1_u, int nc_km1_u, // in - padded wavefield shape
T* k_Phix, int nr_k_Phix, int nc_k_Phix, // in - padded wavefield shape
T* k_Phiz, int nr_k_Phiz, int nc_k_Phiz, // in - padded wavefield shape
T* k_u, int nr_k_u, int nc_k_u, // in - padded wavefield shape
T* C, int nr_C, int nc_C, // in - padded wavefield shape
T* rhs, int nr_rhs, int nc_rhs, // in - padded wavefield shape
T* xlpml, int n_xlpml, // in - length is the number of nodes inside the padding that the pml value is defined.
T* xrpml, int n_xrpml, // in - length is the number of nodes inside the padding that the pml value is defined.
T* zlpml, int n_zlpml, // in - length is the number of nodes inside the padding that the pml value is defined.
T* zrpml, int n_zrpml, // in - length is the number of nodes inside the padding that the pml value is defined.
double const& dt, // in
double const& dx, // in
double const& dz, // in
int const& nx, // in
int const& nz, // in
T* kp1_Phix, int nr_kp1_Phix, int nc_kp1_Phix, // out
T* kp1_Phiz, int nr_kp1_Phiz, int nc_kp1_Phiz, // out
T* kp1_u, int nr_kp1_u, int nc_kp1_u ) // out
{
enum {MAX_FD_SHIFT = ACCURACY/2};
T lapU = 0.0;
// PML variable
T sigmax = 0.0;
T sigmaz = 0.0;
// Time delta variables
T dt2 = dt*dt;
// Loop/index variables
int idx;
int zstride = 1;
int xstride = nz;
int s = zstride;
int i, k;
// shared space step square variable
T dx2 = dx*dx;
T dz2 = dz*dz;
// private variables
//non derivatives
T fac1;
T fac2;
//derivatives
T dux , duz;
T dPhix, dPhiz;
char* NUM = getenv("OMP_NUM_THREADS");
int Num_Th = atoi (NUM);
#pragma omp parallel for private(sigmaz, sigmax, i, k, idx, dux, duz, dPhix, dPhiz, lapU, fac1, fac2) shared(dx, dx2, dz, dz2, nz, nx, kp1_Phix, kp1_Phiz, k_Phix, k_Phiz, n_zrpml, n_zlpml, n_xrpml, xrpml, xlpml, zrpml, zlpml, s, rhs, C, dt, dt2, km1_u, k_u, kp1_u) num_threads(Num_Th) collapse(2)
for(i=0; i < nx; ++i)
{
for(k=0; k < nz; k++)
{
idx = i*xstride + k;
kp1_Phix[idx] = 0.0;
kp1_Phiz[idx] = 0.0;
kp1_u[idx] = 0.0;
// This handles homogeneous Dirichlet BCs and non-updating in ghost regions.
if ((i == 0) || (i == nx-1)) continue;
if ((k == 0) || (k == nz-1)) continue;
lapU = 0.0;
// Do the X direction
// Left side
if (i==0)
{
//decentered derivative 2 ranks on the right
dux = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_u[idx+nz]+(-1./12.)*k_u[idx+2*nz])/dx;
dPhix = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_Phix[idx+nz]+(-1./12.)*k_Phix[idx+2*nz])/ dx;
lapU += ((-1./12.)*0.0+(4./3.)*0.0+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+nz]+(-1./12.)*k_u[idx+2*nz])/ dx2;
}
else if (i == 1)
{
//decentered derivative 1 rank on the right
dux = ((1./12.)*0.0 +(-2./3.)*k_u[idx-nz]+0.0+(2./3.)*k_u[idx+nz]+(-1./12.)*k_u[idx+2*nz])/ dx;
dPhix = ((1./12.)*0.0+(-2./3.)*k_Phix[idx-nz]+0.0+(2./3.)*k_Phix[idx+nz]+(-1./12.)*k_Phix[idx+2*nz])/ dx;
lapU += ((-1./12.)*0.0+(4./3.)*k_u[idx-nz]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+nz]+(-1./12.)*k_u[idx+2*nz])/ dx2;
// Right side
}
else if (i == nx-1)
{
//decentered derivative 2 ranks on the left
dux = ((1./12.)*k_u[idx-2*nz]+(-2./3.)*k_u[idx-nz]+0.0+(2./3.)*0.0 +(-1./12.)*0.0)/ dx;
dPhix = ((1./12.)*k_Phix[idx-2*nz]+(-2./3.)*k_Phix[idx-nz]+0.0+(2./3.)*0.0+(-1./12.)*0.0) / dx;
lapU += ((-1./12.)*k_u[idx-2*nz]+(4./3.)*k_u[idx-nz]+(-5./2.)*k_u[idx]+(4./3.)*0.0+(-1./12.)*0.0)/ dx2;
}
else if (i == nx-2)
{
//decentered derivative 1 ranks on the left
dux = ((1./12.)*k_u[idx-2*nz]+(-2./3.)*k_u[idx-nz]+0.0+(2./3.)*k_u[idx+nz]+(-1./12.)*0.0)/ dx;
dPhix = ((1./12.)*k_Phix[idx-2*nz]+(-2./3.)*k_Phix[idx-nz]+0.0+(2./3.)*k_Phix[idx+nz]+(-1./12.)*0.0)/ dx;
lapU += ((-1./12.)*k_u[idx-2*nz]+(4./3.)*k_u[idx-nz]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+nz]+(-1./12.)*0.0)/ dx2;
}
else
{
//classic centered derivative
dux = ((1./12.)*k_u[idx-2*nz]+(-2./3.)*k_u[idx-nz]+0.0+(2./3.)*k_u[idx+nz]+(-1./12.)*k_u[idx+2*nz])/ dx;
dPhix = ((1./12.)*k_Phix[idx-2*nz]+(-2./3.)*k_Phix[idx-nz]+0.0+(2./3.)*k_Phix[idx+nz]+(-1./12.)*k_Phix[idx+2*nz])/ dx;
lapU += ((-1./12.)*k_u[idx-2*nz]+(4./3.)*k_u[idx-nz]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+nz]+(-1./12.)*k_u[idx+2*nz])/ dx2;
}
// Do the Z direction
// Left side
if (k==0)
{
//decentered derivative 2 ranks on the right
duz = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/dz;
dPhiz = ((1./12.)*0.0+(-2./3.)*0.0+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*k_Phiz[idx+2*s])/ dz;
lapU += ((-1./12.)*0.0+(4./3.)*0.0+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz2;
}
else if (k == 1)
{
//decentered derivative 1 rank on the right
duz = ((1./12.)*0.0 +(-2./3.)*k_u[idx-s]+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz;
dPhiz = ((1./12.)*0.0+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*k_Phiz[idx+2*s])/ dz;
lapU += ((-1./12.)*0.0+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz2;
}
else if (k == nz-1)
{
//decentered derivative 2 ranks on the left
duz = ((1./12.)*k_u[idx-2*s]+(-2./3.)*k_u[idx-s]+0.0+(2./3.)*0.0 +(-1./12.)*0.0)/ dz;
dPhiz = ((1./12.)*k_Phiz[idx-2*s]+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*0.0+(-1./12.)*0.0) / dz;
lapU += ((-1./12.)*k_u[idx-2*s]+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*0.0+(-1./12.)*0.0)/ dz2;
}
else if (k == nz-2)
{
//decentered derivative 1 ranks on the left
duz = ((1./12.)*k_u[idx-2*s]+(-2./3.)*k_u[idx-s]+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*0.0)/ dz;
dPhiz = ((1./12.)*k_Phiz[idx-2*s]+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*0.0)/ dz;
lapU += ((-1./12.)*k_u[idx-2*s]+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*0.0)/ dz2;
}
else
{
//classic centered derivative
duz = ((1./12.)*k_u[idx-2*s]+(-2./3.)*k_u[idx-s]+0.0+(2./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz;
dPhiz = ((1./12.)*k_Phiz[idx-2*s]+(-2./3.)*k_Phiz[idx-s]+0.0+(2./3.)*k_Phiz[idx+s]+(-1./12.)*k_Phiz[idx+2*s])/ dz;
lapU += ((-1./12.)*k_u[idx-2*s]+(4./3.)*k_u[idx-s]+(-5./2.)*k_u[idx]+(4./3.)*k_u[idx+s]+(-1./12.)*k_u[idx+2*s])/ dz2;
}
sigmax = 0.0;
sigmaz = 0.0;
// Check if in left PML-X
if((n_xlpml>0) && (i < n_xlpml))
{
sigmax = xlpml[i];
}
// Check if in right PML-X
else if((n_xrpml>0) && (i >= nx-n_xrpml))
{
sigmax = xrpml[n_xrpml-((nx-1)-i)];
}
// Check if in left PML-Z
if((n_zlpml>0) && (k < n_zlpml))
{
sigmaz = zlpml[k];
}
// Check if in right PML-Z
else if((n_zrpml>0) && (k >= nz-n_zrpml))
{
sigmaz = zrpml[n_zrpml-((nz-1)-k)]; // 0th element of the right pml array corresponds to n_zrpml'th node from the right boundary.
}
if((sigmaz != 0.0) || (sigmax != 0.0))
{
kp1_Phix[idx] = k_Phix[idx] - dt*sigmax*k_Phix[idx] + dt*(sigmaz-sigmax)*dux;
kp1_Phiz[idx] = k_Phiz[idx] - dt*sigmaz*k_Phiz[idx] + dt*(sigmax-sigmaz)*duz;
fac1 = (2.0*dt2 / (2.0 + dt*(sigmax+sigmaz)));
fac2 = (C[idx]*C[idx])*(rhs[idx]+lapU+dPhix+dPhiz) - (km1_u[idx]-2.0*k_u[idx])/dt2 + (sigmax+sigmaz)*km1_u[idx]/(2.0*dt) - (sigmax*sigmaz)*k_u[idx];
kp1_u[idx] = fac1 * fac2;
}
else
{
kp1_Phix[idx] = k_Phix[idx];
kp1_Phiz[idx] = k_Phiz[idx];
kp1_u[idx] = dt2*(C[idx]*C[idx])*(rhs[idx]+lapU+dPhix+dPhiz) - (km1_u[idx]-2.0*k_u[idx]);
}
}
}
};
#endif
|
5442.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4096x4096. */
#include "convolution-2d.h"
/* Array initialization. */
static
void init_array (int ni, int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj))
{
// printf("Initializing Array\n");
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++)
{
A[i][j] = ((DATA_TYPE) (i + j) / nj);
}
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nj,
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nj; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]);
if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_conv2d(int ni,
int nj,
DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj))
{
int i, j;
#pragma scop
#pragma omp parallel for private(j) collapse(2) schedule(dynamic, 16) num_threads(4)
for (i = 1; i < _PB_NI - 1; ++i)
{
for (j = 1; j < _PB_NJ - 1; ++j)
{
B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1]
+ -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1]
+ 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1];
}
}
#pragma endscop
// printf("Kernal computation complete !!\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj);
/* Initialize array(s). */
init_array (ni, nj, POLYBENCH_ARRAY(A));
/* Start timer. */
//polybench_start_instruments;
polybench_timer_start();
/* Run kernel. */
kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B));
/* Stop and print timer. */
polybench_timer_stop();
polybench_timer_print();
//polybench_stop_instruments;
//polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
return 0;
}
|
prior_box_op.h | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <vector>
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/transform.h"
namespace paddle {
namespace operators {
inline void ExpandAspectRatios(const std::vector<float>& input_aspect_ratior,
bool flip,
std::vector<float>* output_aspect_ratior) {
constexpr float epsilon = 1e-6;
output_aspect_ratior->clear();
output_aspect_ratior->push_back(1.0f);
for (size_t i = 0; i < input_aspect_ratior.size(); ++i) {
float ar = input_aspect_ratior[i];
bool already_exist = false;
for (size_t j = 0; j < output_aspect_ratior->size(); ++j) {
if (fabs(ar - output_aspect_ratior->at(j)) < epsilon) {
already_exist = true;
break;
}
}
if (!already_exist) {
output_aspect_ratior->push_back(ar);
if (flip) {
output_aspect_ratior->push_back(1.0f / ar);
}
}
}
}
template <typename T>
class PriorBoxOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<paddle::framework::Tensor>("Input");
auto* image = ctx.Input<paddle::framework::Tensor>("Image");
auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes");
auto* vars = ctx.Output<paddle::framework::Tensor>("Variances");
auto min_sizes = ctx.Attr<std::vector<float>>("min_sizes");
auto max_sizes = ctx.Attr<std::vector<float>>("max_sizes");
auto input_aspect_ratio = ctx.Attr<std::vector<float>>("aspect_ratios");
auto variances = ctx.Attr<std::vector<float>>("variances");
auto flip = ctx.Attr<bool>("flip");
auto clip = ctx.Attr<bool>("clip");
auto min_max_aspect_ratios_order =
ctx.Attr<bool>("min_max_aspect_ratios_order");
std::vector<float> aspect_ratios;
ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios);
T step_w = static_cast<T>(ctx.Attr<float>("step_w"));
T step_h = static_cast<T>(ctx.Attr<float>("step_h"));
T offset = static_cast<T>(ctx.Attr<float>("offset"));
auto img_width = image->dims()[3];
auto img_height = image->dims()[2];
auto feature_width = input->dims()[3];
auto feature_height = input->dims()[2];
T step_width, step_height;
if (step_w == 0 || step_h == 0) {
step_width = static_cast<T>(img_width) / feature_width;
step_height = static_cast<T>(img_height) / feature_height;
} else {
step_width = step_w;
step_height = step_h;
}
int num_priors = aspect_ratios.size() * min_sizes.size();
if (max_sizes.size() > 0) {
num_priors += max_sizes.size();
}
boxes->mutable_data<T>(ctx.GetPlace());
vars->mutable_data<T>(ctx.GetPlace());
T* b_t = boxes->data<T>();
for (int h = 0; h < feature_height; ++h) {
for (int w = 0; w < feature_width; ++w) {
T center_x = (w + offset) * step_width;
T center_y = (h + offset) * step_height;
T box_width, box_height;
for (size_t s = 0; s < min_sizes.size(); ++s) {
auto min_size = min_sizes[s];
if (min_max_aspect_ratios_order) {
box_width = box_height = min_size / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
if (max_sizes.size() > 0) {
auto max_size = max_sizes[s];
// square prior with size sqrt(minSize * maxSize)
box_width = box_height = sqrt(min_size * max_size) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
// priors with different aspect ratios
for (size_t r = 0; r < aspect_ratios.size(); ++r) {
float ar = aspect_ratios[r];
if (fabs(ar - 1.) < 1e-6) {
continue;
}
box_width = min_size * sqrt(ar) / 2.;
box_height = min_size / sqrt(ar) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
} else {
// priors with different aspect ratios
for (size_t r = 0; r < aspect_ratios.size(); ++r) {
float ar = aspect_ratios[r];
box_width = min_size * sqrt(ar) / 2.;
box_height = min_size / sqrt(ar) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
if (max_sizes.size() > 0) {
auto max_size = max_sizes[s];
// square prior with size sqrt(minSize * maxSize)
box_width = box_height = sqrt(min_size * max_size) / 2.;
b_t[0] = (center_x - box_width) / img_width;
b_t[1] = (center_y - box_height) / img_height;
b_t[2] = (center_x + box_width) / img_width;
b_t[3] = (center_y + box_height) / img_height;
b_t += 4;
}
}
}
}
}
if (clip) {
T* dt = boxes->data<T>();
std::transform(dt, dt + boxes->numel(), dt, [](T v) -> T {
return std::min<T>(std::max<T>(v, 0.), 1.);
});
}
framework::Tensor var_t;
var_t.mutable_data<T>(
framework::make_ddim({1, static_cast<int>(variances.size())}),
ctx.GetPlace());
auto var_et = framework::EigenTensor<T, 2>::From(var_t);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (size_t i = 0; i < variances.size(); ++i) {
var_et(0, i) = variances[i];
}
int box_num = feature_height * feature_width * num_priors;
auto var_dim = vars->dims();
vars->Resize({box_num, static_cast<int>(variances.size())});
auto e_vars = framework::EigenMatrix<T, Eigen::RowMajor>::From(*vars);
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for collapse(2)
#endif
for (int i = 0; i < box_num; ++i) {
for (int j = 0; j < variances.size(); ++j) {
e_vars(i, j) = variances[j];
}
}
vars->Resize(var_dim);
}
}; // namespace operators
} // namespace operators
} // namespace paddle
|
update_ops_named_state.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "constant.h"
#include "update_ops.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
void normalize(double squared_norm, CTYPE* state, ITYPE dim){
const ITYPE loop_dim = dim;
const double normalize_factor = sqrt(1./squared_norm);
ITYPE state_index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(state_index=0 ; state_index<loop_dim ; ++state_index){
state[state_index] *= normalize_factor;
}
}
void state_add(const CTYPE *state_added, CTYPE *state, ITYPE dim) {
ITYPE index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < dim; ++index) {
state[index] += state_added[index];
}
}
void state_multiply(CTYPE coef, CTYPE *state, ITYPE dim) {
ITYPE index;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < dim; ++index) {
state[index] *= coef;
}
}
|
apm_omp.c | /**
* * APPROXIMATE PATTERN MATCHING
* *
* * INF560 X2016
* */
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#include <mpi.h>
#include <omp.h>
#define APM_DEBUG 0
char *read_input_file(char * filename, int * size) {
char * buf ;
off_t fsize;
int fd = 0 ;
int n_bytes = 1 ;
/* Open the text file */
fd = open(filename, O_RDONLY);
if (fd == -1) {
fprintf(stderr, "Unable to open the text file <%s>\n", filename);
return NULL;
}
/* Get the number of characters in the textfile */
fsize = lseek(fd, 0, SEEK_END);
lseek(fd, 0, SEEK_SET);
/* TODO check return of lseek */
#if APM_DEBUG
printf( "File length: %lld\n", fsize ) ;
#endif
/* Allocate data to copy the target text */
buf = (char *)malloc( fsize * sizeof ( char ) ) ;
if (buf == NULL) {
fprintf(stderr,
"Unable to allocate %lld byte(s) for main array\n",
fsize);
return NULL;
}
n_bytes = read(fd, buf, fsize);
if (n_bytes != fsize) {
fprintf(stderr,
"Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n",
fsize, n_bytes) ;
return NULL ;
}
#if APM_DEBUG
printf( "Number of read bytes: %d\n", n_bytes ) ;
#endif
*size = n_bytes ;
close(fd);
return buf;
}
#define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c)))
int levenshtein(char *s1, char *s2, int len, int * column) {
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++) {
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
return(column[len]);
}
int main(int argc, char ** argv) {
MPI_Init(&argc, &argv);
char ** pattern ;
int * scounts;
int * displs;
int step;
char * filename ;
int approx_factor = 0 ;
int nb_patterns = 0 ;
int i, j ;
char * buf ;
struct timeval t1, t2;
double duration ;
int * n_matches ;
int n_bytes ;
int rank;
int size;
int max_pat;
int chunk_size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
/* Check number of arguments */
if (argc < 4) {
printf("Usage: %s approximation_factor "
"dna_database pattern1 pattern2 ...\n",
argv[0]);
return 1;
}
n_bytes=0;
/* Get the distance factor */
approx_factor = atoi(argv[1]);
/* Grab the filename containing the target text */
filename = argv[2];
/* Get the number of patterns that the user wants to search for */
nb_patterns = argc - 3;
/* Fill the pattern array */
pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ;
if (pattern == NULL) {
fprintf(
stderr,
"Unable to allocate array of pattern of size %d\n",
nb_patterns
);
return 1 ;
}
/* Grab the patterns */
for (i = 0 ; i < nb_patterns ; i++) {
int l ;
l = strlen(argv[i+3]) ;
if (l <= 0) {
fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ;
return 1 ;
}
pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ;
if (pattern[i] == NULL) {
fprintf( stderr, "Unable to allocate string of size %d\n", l ) ;
return 1 ;
}
strncpy( pattern[i], argv[i+3], (l+1) ) ;
}
if(rank==0) {
printf( "Approximate Pattern Mathing: "
"looking for %d pattern(s) in file %s w/ distance of %d\n",
nb_patterns, filename, approx_factor );
}
if(rank==0){
buf = read_input_file( filename, &n_bytes ) ;
if ( buf == NULL ) {
return 1 ;
}
}
/* Allocate the array of matches */
n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ;
if (n_matches == NULL) {
fprintf(
stderr,
"Error: unable to allocate memory for %ldB\n",
nb_patterns * sizeof( int )
);
return 1 ;
}
/*****
* * BEGIN MAIN LOOP
* ******/
/* Timer start */
if(rank==0){
gettimeofday(&t1, NULL);
}
max_pat=0;
for(i=0; i<nb_patterns; i++){
max_pat=max_pat>strlen(pattern[i]) ? max_pat : strlen(pattern[i]);
}
MPI_Bcast(&n_bytes, 1, MPI_INT, 0, MPI_COMM_WORLD);
step=n_bytes/(size);
displs=malloc( (size) * sizeof( int ) ) ;
scounts=malloc( (size) * sizeof( int ) ) ;
for(i=0; i<size-1; i++){
displs[i]=step*i;
scounts[i]=step+max_pat-1;
}
displs[size-1]=step*(size-1);
scounts[size-1]=step+n_bytes%(size);
char * rcv_buf;
rcv_buf= (char *) malloc((step+max_pat-1+n_bytes%size)*sizeof(char)) ;
MPI_Scatterv(buf, scounts, displs, MPI_CHAR, rcv_buf, step+max_pat-1+n_bytes%size, MPI_CHAR, 0, MPI_COMM_WORLD);
for ( i = 0 ; i < nb_patterns ; i++ ) {
int size_pattern = strlen(pattern[i]) ;
n_matches[i] = 0 ;
int my_sum=0;
if (rank!=size-1) {
chunk_size=step;
#pragma omp parallel
{
#pragma omp for schedule(dynamic) reduction(+:my_sum)
for ( j = 0 ; j < chunk_size ; j++ ) {
int * column ;
column = (int *)malloc( (size_pattern+1) * sizeof( int ) ) ;
int distance = 0;
int s ;
#if APM_DEBUG
if ( j % 100 == 0 )
{
printf( "Procesing byte %d (out of %d)\n", j, n_bytes ) ;
}
#endif
distance = levenshtein( pattern[i], &rcv_buf[j], size_pattern, column );
if ( distance <= approx_factor ) {
my_sum++ ;
}
free( column );
}
}
}
else {
chunk_size=step+n_bytes%(size);
#pragma omp parallel
{
#pragma omp for schedule(dynamic) reduction(+:my_sum)
for ( j = 0 ; j < chunk_size ; j++ ) {
int * column ;
column = (int *)malloc( (size_pattern+1) * sizeof( int ) ) ;
int distance = 0 ;
int s ;
#if APM_DEBUG
if ( j % 100 == 0 )
{
printf( "Procesing byte %d (out of %d)\n", j, n_bytes ) ;
}
#endif
s = size_pattern ;
if ( chunk_size - j < size_pattern )
{
s = chunk_size - j ;
}
distance = levenshtein( pattern[i], &rcv_buf[j], s, column )+size_pattern-s ;
if ( distance <= approx_factor ) {
my_sum++ ;
}
free( column );
}
}
}
n_matches[i]=my_sum;
printf("%d matches from %d\n", n_matches[i], rank);
}
int rcv_matches[nb_patterns] ;
MPI_Reduce(n_matches, rcv_matches, nb_patterns, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);
/* Timer stop */
gettimeofday(&t2, NULL);
MPI_Barrier(MPI_COMM_WORLD);
duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6);
if(rank==0){
printf( "APM done in %lf s\n", duration ) ;
}
/*****
* * END MAIN LOOP
* ******/
if(rank==0){
for ( i = 0 ; i < nb_patterns ; i++ )
{
printf( "Number of matches for pattern <%s>: %d\n",
pattern[i], rcv_matches[i] ) ;
}
}
MPI_Finalize();
return 0 ;
}
|
target-1.c | /* { dg-do run } */
#include <stdlib.h>
#define N 100000
void init (int *a1, int *a2)
{
int i, s = -1;
for (i = 0; i < N; i++)
{
a1[i] = s;
a2[i] = i;
s = -s;
}
}
void check (int *a, int *b)
{
int i;
for (i = 0; i < N; i++)
if (a[i] != b[i])
abort ();
}
void vec_mult_ref (int *p)
{
int i;
int v1[N], v2[N];
init (v1, v2);
for (i = 0; i < N; i++)
p[i] = v1[i] * v2[i];
}
void vec_mult (int *p)
{
int i;
int v1[N], v2[N];
init (v1, v2);
#pragma omp target map(p[0:N])
#pragma omp parallel for
for (i = 0; i < N; i++)
p[i] = v1[i] * v2[i];
}
int main ()
{
int p1[N], p2[N];
int v1[N], v2[N];
init (v1, v2);
vec_mult_ref (p1);
vec_mult (p2);
check (p1, p2);
return 0;
}
|
line_search_contact_strategy.h | // KRATOS ___| | | |
// \___ \ __| __| | | __| __| | | __| _` | |
// | | | | | ( | | | | ( | |
// _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS
//
// License: BSD License
// license: StructuralMechanicsApplication/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
#if !defined(KRATOS_LINE_SEARCH_CONTACT_STRATEGY)
#define KRATOS_LINE_SEARCH_CONTACT_STRATEGY
/* System Includes */
/* External Includes */
/* Project includes */
#include "includes/kratos_parameters.h"
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/variables.h"
#include "solving_strategies/strategies/solving_strategy.h"
#include "solving_strategies/strategies/line_search_strategy.h"
#include "utilities/openmp_utils.h"
#include "utilities/variable_utils.h"
#include "utilities/atomic_utilities.h"
// Convergence criterias
#include "solving_strategies/convergencecriterias/convergence_criteria.h"
// Default builder and solver
#include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h"
// TODO: Extend the descriptions
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/** \brief Short class definition.
This class
*/
template<class TSparseSpace,
class TDenseSpace, // = DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class LineSearchContactStrategy :
public LineSearchStrategy< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
typedef ConvergenceCriteria<TSparseSpace, TDenseSpace> TConvergenceCriteriaType;
/** Counted pointer of ClassName */
KRATOS_CLASS_POINTER_DEFINITION( LineSearchContactStrategy );
typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> StrategyBaseType;
typedef ResidualBasedNewtonRaphsonStrategy<TSparseSpace, TDenseSpace, TLinearSolver> NRBaseType;
typedef LineSearchStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TBuilderAndSolverType TBuilderAndSolverType;
typedef typename BaseType::TDataType TDataType;
typedef TSparseSpace SparseSpaceType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef ModelPart::NodesContainerType NodesArrayType;
typedef ModelPart::ConditionsContainerType ConditionsArrayType;
typedef std::size_t IndexType;
/**
* Default constructor
* @param rModelPart: The model part of the problem
* @param pScheme: The integration scheme
* @param pNewLinearSolver: The linear solver employed
* @param pNewConvergenceCriteria: The convergence criteria employed
* @param MaxIterationNumber: The maximum number of iterations
* @param CalculateReactions: The flag for the reaction calculation
* @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag: The flag that allows to move the mesh
*/
LineSearchContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: LineSearchStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag)
{
KRATOS_TRY;
Parameters DefaultParameters = Parameters(R"(
{
})" );
ThisParameters.ValidateAndAssignDefaults(DefaultParameters);
KRATOS_CATCH("");
}
/**
* Default constructor
* @param rModelPart: The model part of the problem
* @param pScheme: The integration scheme
* @param pNewLinearSolver: The linear solver employed
* @param pNewConvergenceCriteria: The convergence criteria employed
* @param MaxIterationNumber: The maximum number of iterations
* @param CalculateReactions: The flag for the reaction calculation
* @param ReformDofSetAtEachStep: The flag that allows to compute the modification of the DOF
* @param MoveMeshFlag: The flag that allows to move the mesh
*/
LineSearchContactStrategy(
ModelPart& rModelPart,
typename TSchemeType::Pointer pScheme,
typename TLinearSolver::Pointer pNewLinearSolver,
typename TConvergenceCriteriaType::Pointer pNewConvergenceCriteria,
typename TBuilderAndSolverType::Pointer pNewBuilderAndSolver,
IndexType MaxIterations = 30,
bool CalculateReactions = false,
bool ReformDofSetAtEachStep = false,
bool MoveMeshFlag = false,
Parameters ThisParameters = Parameters(R"({})")
)
: LineSearchStrategy<TSparseSpace, TDenseSpace, TLinearSolver>(rModelPart, pScheme, pNewLinearSolver, pNewConvergenceCriteria, pNewBuilderAndSolver, MaxIterations, CalculateReactions, ReformDofSetAtEachStep, MoveMeshFlag )
{
KRATOS_TRY;
Parameters DefaultParameters = Parameters(R"(
{
})" );
ThisParameters.ValidateAndAssignDefaults(DefaultParameters);
KRATOS_CATCH("");
}
/**
* Destructor.
*/
~LineSearchContactStrategy() override
= default;
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
///@}
///@name Friends
///@{
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
bool mRecalculateFactor; // To check if we recalculate or not the scale factor
///@}
///@name Protected Operators
///@{
/**
* Performs all the required operations that should be done (for each step)
* before solving the solution step.
* A member variable should be used as a flag to make sure this function is called only once per step.
*/
void InitializeSolutionStep() override
{
BaseType::InitializeSolutionStep();
// TODO: Add something if necessary
}
/**
* Here the database is updated
*/
void UpdateDatabase(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b,
const bool MoveMesh
) override
{
typename TSchemeType::Pointer pScheme = this->GetScheme();
typename TBuilderAndSolverType::Pointer pBuilderAndSolver = this->GetBuilderAndSolver(); // FIXME: Separate in the parts of LM and displacement
TSystemVectorType aux(b.size()); //TODO: do it by using the space
TSparseSpace::Assign(aux, 0.5, Dx);
TSystemVectorType DxDisp(b.size());
TSystemVectorType DxLM(b.size());
ComputeSplitDx(Dx, DxDisp, DxLM);
// Compute residual without update
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b );
double roDisp;
double roLM;
ComputeMixedResidual(b, roDisp, roLM);
// Compute half step residual
NRBaseType::UpdateDatabase(A,aux,b,MoveMesh);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b );
double rhDisp;
double rhLM;
ComputeMixedResidual(b, rhDisp, rhLM);
// Compute full step residual (add another half Dx to the previous half)
NRBaseType::UpdateDatabase(A,aux,b,MoveMesh);
TSparseSpace::SetToZero(b);
pBuilderAndSolver->BuildRHS(pScheme, BaseType::GetModelPart(), b );
double rfDisp;
double rfLM;
ComputeMixedResidual(b, rfDisp, rfLM);
// We compute the parabola
double XminDisp = 1e-3;
double XmaxDisp = 1.0;
double XminLM = 1e-3;
double XmaxLM = 1.0;
ComputeParabola(XminDisp, XmaxDisp, rfDisp, roDisp, rhDisp);
ComputeParabola(XminLM, XmaxLM, rfLM, roLM, rhLM);
// Perform final update
TSparseSpace::Assign(aux,-(1.0 - XmaxDisp), DxDisp);
TSparseSpace::UnaliasedAdd(aux,-(1.0 - XmaxLM), DxLM);
NRBaseType::UpdateDatabase(A,aux,b,MoveMesh);
}
/**
* This method split the vector of increment of DoF in displacement and LM
* @param Dx The increment of displacements and LM
* @param DxDisp The increment of displacements
* @param DxLM The increment of LM
*/
void ComputeSplitDx(
TSystemVectorType& Dx,
TSystemVectorType& DxDisp,
TSystemVectorType& DxLM
)
{
// Now we iterate over all the nodes
NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes();
const int num_nodes = static_cast<int>(nodes_array.size());
#pragma omp parallel for
for(int i = 0; i < num_nodes; ++i)
{
auto it_node = nodes_array.begin() + i;
for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++)
{
const int j = (**itDoF).EquationId();
const std::size_t CurrVar = (**itDoF).GetVariable().Key();
if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z))
{
DxDisp[j] = Dx[j];
DxLM[j] = 0.0;
}
else // Corresponding with contact
{
DxDisp[j] = 0.0;
DxLM[j] = Dx[j];
}
}
}
}
/**
* This method calculates the norm considering one norm for the displacement and other norm for the LM
* @param b The residual vector
* @param normDisp normDisp: The norm of the displacement
* @param normLM The norm of the LM
*/
void ComputeMixedResidual(
TSystemVectorType& b,
double& normDisp,
double& normLM
)
{
// Now we iterate over all the nodes
NodesArrayType& nodes_array = StrategyBaseType::GetModelPart().Nodes();
const int num_nodes = static_cast<int>(nodes_array.size());
#pragma omp parallel for
for(int i = 0; i < num_nodes; ++i) {
auto it_node = nodes_array.begin() + i;
for(auto itDoF = it_node->GetDofs().begin() ; itDoF != it_node->GetDofs().end() ; itDoF++) {
const int j = (**itDoF).EquationId();
const std::size_t CurrVar = (**itDoF).GetVariable().Key();
if ((CurrVar == DISPLACEMENT_X) || (CurrVar == DISPLACEMENT_Y) || (CurrVar == DISPLACEMENT_Z)) {
AtomicAdd(normDisp, b[j] * b[j]);
} else { // Corresponding with contact
AtomicAdd(normLM, b[j] * b[j]);
}
}
}
normDisp = std::sqrt(normDisp);
normLM = std::sqrt(normLM);
}
/**
* This method computes the parabola necessary for the line search
* @param Xmax The maximal abscissa
* @param Xmin The norm of the LM
* @param rf The residual norm of the full step
* @param ro The residual norm without step
* @param rh The residual norm of the half step
*/
void ComputeParabola(
double& Xmax,
double& Xmin,
const double rf,
const double ro,
const double rh
)
{
// Compute optimal (limited to the range 0-1)
// Parabola is y = a*x^2 + b*x + c -> min/max for
// x=0 --> r=ro
// x=1/2 --> r=rh
// x=1 --> r =
// c= ro, b= 4*rh -rf -3*ro, a= 2*rf - 4*rh + 2*ro
// max found if a>0 at the position Xmax = (rf/4 - rh)/(rf - 2*rh);
const double parabole_a = 2 * rf + 2 * ro - 4 * rh;
const double parabole_b = 4 * rh - rf - 3 * ro;
if( parabole_a > 0.0) // If parabola has a local minima
{
Xmax = -0.5 * parabole_b/parabole_a; // -b / 2a
if( Xmax > 1.0)
Xmax = 1.0;
else if(Xmax < -1.0)
Xmax = -1.0;
}
else // Parabola degenerates to either a line or to have a local max. best solution on either extreme
{
if(rf < ro)
Xmax = 1.0;
else
Xmax = Xmin; // Should be zero, but otherwise it will stagnate
}
}
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@{
/**
* Copy constructor.
*/
LineSearchContactStrategy(const LineSearchContactStrategy& Other)
{
};
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@}
///@name Serialization
///@{
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class LineSearchContactStrategy */
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
///@}
} // namespace Kratos
#endif /* KRATOS_LINE_SEARCH_CONTACT_STRATEGY */
|
atomic.c | /* PMSIS includes */
#include "pmsis.h"
#include "omp.h"
#define ARRAY_SIZE 512
uint32_t a[ARRAY_SIZE] = {0};
uint32_t b[ARRAY_SIZE] = {0};
uint32_t c[ARRAY_SIZE] = {0};
uint32_t errors = 0;
/* Cluster main entry, executed by core 0. */
void cluster_delegate(void *arg)
{
printf("Cluster master core entry\n");
int32_t counter = 0;
#pragma omp parallel
{
printf("[%d %d] Fork entry\n", pi_cluster_id(), omp_get_thread_num() );
#pragma omp barrier
{
#pragma omp atomic
counter ++;
}
}
printf("Core counter: %d\n", counter);
if (counter != omp_get_num_threads())
{
errors = 1;
}
printf("Cluster master core exit\n");
}
void helloworld(void)
{
printf("Entering main controller\n");
uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id();
printf("[%d %d] Hello World!\n", cluster_id, core_id);
struct pi_device cluster_dev;
struct pi_cluster_conf cl_conf;
/* Init cluster configuration structure. */
pi_cluster_conf_init(&cl_conf);
cl_conf.id = 0; /* Set cluster ID. */
/* Configure & open cluster. */
pi_open_from_conf(&cluster_dev, &cl_conf);
if (pi_cluster_open(&cluster_dev))
{
printf("Cluster open failed !\n");
pmsis_exit(-1);
}
/* Prepare cluster task and send it to cluster. */
struct pi_cluster_task cl_task;
pi_cluster_send_task_to_cl(&cluster_dev, pi_cluster_task(&cl_task, cluster_delegate, NULL));
pi_cluster_close(&cluster_dev);
if (errors)
{
printf("Test failed!\n");
}
else
{
printf("Test success!\n");
}
pmsis_exit(errors);
}
/* Program Entry. */
int main(void)
{
printf("\n\n\t *** PMSIS HelloWorld ***\n\n");
return pmsis_kickoff((void *) helloworld);
}
|
blake2sp.c | /*
BLAKE2 reference source code package - optimized C implementations
Written in 2012 by Samuel Neves <sneves@dei.uc.pt>
To the extent possible under law, the author(s) have dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
You should have received a copy of the CC0 Public Domain Dedication along with
this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "blake2.h"
#include "blake2-impl.h"
#define PARALLELISM_DEGREE 8
static inline int blake2sp_init_leaf( blake2s_state *S, uint8_t outlen, uint8_t keylen, uint64_t offset )
{
blake2s_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
P->leaf_length = 0;
store48( P->node_offset, offset );
P->node_depth = 0;
P->inner_length = outlen;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
static inline int blake2sp_init_root( blake2s_state *S, uint8_t outlen, uint8_t keylen )
{
blake2s_param P[1];
P->digest_length = outlen;
P->key_length = keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
P->leaf_length = 0;
store48( P->node_offset, 0ULL );
P->node_depth = 1;
P->inner_length = outlen;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
int blake2sp_init( blake2sp_state *S, const uint8_t outlen )
{
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2sp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2sp_init_key( blake2sp_state *S, const uint8_t outlen, const void *key, const uint8_t keylen )
{
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
if( blake2sp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2sp_update( blake2sp_state *S, const uint8_t *in, uint64_t inlen )
{
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S->S[id__], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2sp_final( blake2sp_state *S, uint8_t *out, const uint8_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2S_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES;
if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES;
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left );
}
blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES );
}
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES );
blake2s_final( S->R, out, outlen );
return 0;
}
int blake2sp( uint8_t *out, const void *in, const void *key, uint8_t outlen, uint64_t inlen, uint8_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
blake2s_state S[PARALLELISM_DEGREE][1];
blake2s_state FS[1];
/* Verify parameters */
if ( NULL == in ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key ) keylen = 0;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; // mark last node
if( keylen > 0 )
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( size_t id__ = 0; id__ < PARALLELISM_DEGREE; ++id__ )
#endif
{
#if defined(_OPENMP)
size_t id__ = omp_get_thread_num();
#endif
uint64_t inlen__ = inlen;
const uint8_t *in__ = ( const uint8_t * )in;
in__ += id__ * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S[id__], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
if( inlen__ > id__ * BLAKE2S_BLOCKBYTES )
{
const size_t left = inlen__ - id__ * BLAKE2S_BLOCKBYTES;
const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES;
blake2s_update( S[id__], in__, len );
}
blake2s_final( S[id__], hash[id__], BLAKE2S_OUTBYTES );
}
if( blake2sp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1;
for( size_t i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES );
blake2s_final( FS, out, outlen );
return 0;
}
#if defined(BLAKE2SP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( int argc, char **argv )
{
uint8_t key[BLAKE2S_KEYBYTES];
uint8_t buf[KAT_LENGTH];
for( size_t i = 0; i < BLAKE2S_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
for( size_t i = 0; i < KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp( hash, buf, key, BLAKE2S_OUTBYTES, i, BLAKE2S_KEYBYTES );
if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) )
{
puts( "error" );
return -1;
}
}
puts( "ok" );
return 0;
}
#endif
|
hoNDArray_utils.h | #pragma once
#include <boost/make_shared.hpp>
#include <boost/range/combine.hpp>
#include <numeric>
#include "hoNDArray.h"
#include "hoNDArray_iterators.h"
#include "vector_td_utilities.h"
#include <boost/version.hpp>
#if (BOOST_VERSION < 107200)
#include <boost/math/interpolators/cubic_b_spline.hpp>
namespace boost::math::interpolators {
auto cardinal_cubic_b_spline = [](auto ... args){return boost::math::cubic_b_spline(args...);};
}
#else
#include <boost/math/interpolators/cardinal_cubic_b_spline.hpp>
#endif
#include <boost/math/special_functions/trunc.hpp>
#include <boost/range/adaptor/strided.hpp>
#include <range/v3/numeric.hpp>
#include <range/v3/view.hpp>
#include <range/v3/action.hpp>
#ifdef USE_OMP
#include <omp.h>
#endif
#ifdef max
#undef max
#endif
#ifdef min
#undef min
#endif
namespace Gadgetron {
class ArrayIterator
{
public:
ArrayIterator(std::vector<size_t> *dimensions, std::vector<size_t> *order)
{
block_sizes_.push_back(1);
for (size_t i = 0; i < order->size(); i++) {
dimensions_.push_back((*dimensions)[i]);
order_.push_back((*order)[i]);
current_.push_back(0);
if (i > 0) {
block_sizes_.push_back(block_sizes_[i-1]*dimensions_[i-1]);
}
}
current_idx_ = 0;
}
inline size_t advance()
{
size_t order_index = 0;
current_[order_[order_index]]++;
while (current_[order_[order_index]] >= dimensions_[order_[order_index]]) {
current_[order_[order_index]] = 0;
order_index = (order_index+1)%dimensions_.size();
current_[order_[order_index]]++;
}
current_idx_ = 0;
for (size_t i = 0; i < dimensions_.size(); i++) {
current_idx_ += current_[i]*block_sizes_[i];
}
return current_idx_;
}
inline size_t get_current_idx() const {
return current_idx_;
}
std::vector<size_t> get_current_sub() {
return current_;
}
protected:
std::vector<size_t> dimensions_;
std::vector<size_t> order_;
std::vector<size_t> current_;
std::vector<size_t> block_sizes_;
size_t current_idx_;
};
template<class T> hoNDArray<T> shift_dim( const hoNDArray<T>& in, int shift )
{
std::vector<size_t> order;
for (size_t i = 0; i < in.get_number_of_dimensions(); i++) {
order.push_back(static_cast<size_t>((i+shift)%in.get_number_of_dimensions()));
}
return permute(in,order);
}
template<class T> void shift_dim(const hoNDArray<T>& in, hoNDArray<T>& out, int shift )
{
std::vector<size_t> order;
for (size_t i = 0; i < in.get_number_of_dimensions(); i++) {
order.push_back(static_cast<size_t>((i+shift)%in.get_number_of_dimensions()));
}
permute(in,out,order);
}
template<class T> hoNDArray<T>
permute( const hoNDArray<T>& in, const std::vector<size_t>& dim_order)
{
std::vector<size_t> dims;
for (size_t i = 0; i < dim_order.size(); i++)
dims.push_back(in.get_dimensions()->at(dim_order[i]));
hoNDArray<T> out(dims);
permute( in, out, dim_order);
return out;
}
template<class T> void
permute(const hoNDArray<T>& in, hoNDArray<T>& out, const std::vector<size_t>& dim_order)
{
// Check ordering array
if (dim_order.size() > in.get_number_of_dimensions()) {
throw std::runtime_error("hoNDArray::permute - Invalid length of dimension ordering array");;
}
std::vector<size_t> dim_count(in.get_number_of_dimensions(),0);
for (size_t i = 0; i < dim_order.size(); i++) {
if (dim_order[i] >= in.get_number_of_dimensions()) {
throw std::runtime_error("hoNDArray::permute - Invalid dimension order array");;
}
dim_count[dim_order[i]]++;
}
// Create an internal array to store the dimensions
std::vector<size_t> dim_order_int;
// Check that there are no duplicate dimensions
for (size_t i = 0; i < dim_order.size(); i++) {
if (dim_count[dim_order[i]] != 1) {
throw std::runtime_error("hoNDArray::permute - Invalid dimension order array (duplicates)");;
}
dim_order_int.push_back(dim_order[i]);
}
for (size_t i = 0; i < dim_order_int.size(); i++) {
if ((*in.get_dimensions())[dim_order_int[i]] != out.get_size(i)) {
throw std::runtime_error("permute(): dimensions of output array do not match the input array");;
}
}
// Pad dimension order array with dimension not mentioned in order array
if (dim_order_int.size() < in.get_number_of_dimensions()) {
for (size_t i = 0; i < dim_count.size(); i++) {
if (dim_count[i] == 0) {
dim_order_int.push_back(i);
}
}
}
T* o = out.get_data_ptr();
// if memcpy can be used during permute
size_t stride = 1;
size_t num_dim_memcpy = 0;
for (size_t i = 0; i < dim_order_int.size(); i++) {
if (dim_order_int[i]==i){
stride *= in.get_size(i);
num_dim_memcpy = i;
}
else{
break;
}
}
if (stride == 1) {
// point by point assignment is needed
ArrayIterator it(in.get_dimensions().get(), &dim_order_int);
for (size_t i = 0; i < in.get_number_of_elements(); i++) {
o[i] = in.get_data_ptr()[it.get_current_idx()];
it.advance();
}
}
else {
// memcpy can be used
size_t nDim = in.get_number_of_dimensions();
size_t num_memcpy = in.get_number_of_elements() / stride;
if (num_dim_memcpy == nDim - 1){
memcpy(out.begin(), in.begin(), in.get_number_of_bytes());
return;
}
// for the array index calculation
std::vector<size_t> dim_permute(nDim-num_dim_memcpy-1);
for (size_t i = num_dim_memcpy+1; i < dim_order_int.size(); i++) {
dim_permute[i - num_dim_memcpy - 1] = in.get_size(i);
}
size_t n;
const hoNDArray<T> permuteArray(dim_permute, const_cast<T*>(in.get_data_ptr()), false);
// starting index for in and out array for every permute memcpy operation
std::vector<size_t> ind_permute_in(dim_permute.size(), 0), ind_in(nDim, 0), ind_out(nDim, 0);
for (n = 0; n < num_memcpy; n++) {
permuteArray.calculate_index(n, ind_permute_in);
memcpy(&ind_in[0] + num_dim_memcpy + 1, &ind_permute_in[0], sizeof(size_t)*ind_permute_in.size());
// permute the indexes
for (size_t i = 0; i < nDim; i++) {
ind_out[i] = ind_in[dim_order_int[i]];
}
size_t offset_in = in.calculate_offset(ind_in);
size_t offset_out = out.calculate_offset(ind_out);
memcpy(o + offset_out, in.begin() + offset_in, sizeof(T)*stride);
}
}
}
// Expand array to new dimension
template<class T> hoNDArray<T>
expand(const hoNDArray<T>& in, size_t new_dim_size )
{
const size_t number_of_elements_in = in.get_number_of_elements();
std::vector<size_t> dims = in.dimensions();
dims.push_back(new_dim_size);
auto out = hoNDArray<T>(dims);
#ifdef USE_OMP
#pragma omp parallel for
#endif
for( long long int idx=0; idx<number_of_elements_in*new_dim_size; idx++ ){
out[idx] = in[idx%number_of_elements_in];
}
return out;
}
namespace {
template<class T, class ACCUMULATOR> hoNDArray<T>
accumulate(const hoNDArray<T>& in, size_t dim, ACCUMULATOR acc )
{
if( !(in.get_number_of_dimensions()>1) ){
throw std::runtime_error("sum(): underdimensioned.");;
}
if( dim > in.get_number_of_dimensions()-1 ){
throw std::runtime_error( "sum(): dimension out of range.");;
}
size_t number_of_batches = in.get_size(dim);
size_t number_of_elements = in.get_number_of_elements()/number_of_batches;
std::vector<size_t> dims;
for (auto i = 0; i < in.get_number_of_dimensions(); i++){
if (i != dim) dims.push_back(in.get_size(i));
}
auto out = hoNDArray<T>(dims);
auto orig_dims = *in.get_dimensions();
auto stride = std::accumulate(orig_dims.begin(),orig_dims.begin()+dim,1,std::multiplies<size_t>());
size_t inner_elements = stride;
size_t outer_elements = out.get_number_of_elements()/inner_elements;
//#ifdef USE_OMP
//#pragma omp parallel for schedule(dynamic,1) collapse(2)
//#endif
for (size_t outer_idx = 0; outer_idx < outer_elements; outer_idx++) {
for (size_t idx = 0; idx < inner_elements; idx++) {
size_t offset = outer_idx*inner_elements;
size_t old_offset = offset*number_of_batches;
T val = in.at(idx+old_offset);
for (size_t j = 1; j < number_of_batches; j++) {
size_t in_idx = j * stride + idx+ old_offset;
val = acc(val,in.at(in_idx));
}
out.at(idx + offset) = val;
}
}
return out;
}
}
// Sum over dimension
template<class T> hoNDArray<T>
sum(const hoNDArray<T>& in, size_t dim )
{
return accumulate(in, dim, std::plus<T>());
}
template<class T> boost::shared_ptr<hoNDArray<T>>
sum(const hoNDArray<T>* in, size_t dim )
{
return boost::make_shared<hoNDArray<T>>(accumulate(*in, dim, std::plus<T>()));
}
template<class T> hoNDArray<T>
max(const hoNDArray<T>& in, size_t dim )
{
return accumulate(in, dim, [](auto v1, auto v2){ return std::max(v1,v2);});
}
template<class T> hoNDArray<T>
min(const hoNDArray<T>& in, size_t dim )
{
return accumulate(in, dim, [](auto v1, auto v2){ return std::min(v1,v2);});
}
/**
* @param[in] crop_offset starting position to crop
* @param[in] crop_size Size of cropped array
* @param[in] in input array
* @param[out] out Output array after cropping
*/
template<class T, unsigned int D> void
crop(const vector_td<size_t, D>& crop_offset, const vector_td<size_t, D>& crop_size, const hoNDArray<T>& in, hoNDArray<T>& out)
{
if (in.get_number_of_dimensions() < D){
std::stringstream ss;
ss << "crop: number of image dimensions should be at least " << D;
throw std::runtime_error(ss.str());;
}
std::vector<size_t> dims = to_std_vector(crop_size);
for (unsigned int d = D; d<in.get_number_of_dimensions(); d++){
dims.push_back(in.get_size(d));
}
if (!out.dimensions_equal(&dims)){
out.create(dims);
}
typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t, D>(*in.get_dimensions());
typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t, D>(*out.get_dimensions());
if (weak_greater(crop_offset + matrix_size_out, matrix_size_in)){
throw std::runtime_error("crop: cropping size mismatch");;
}
size_t len = out.get_size(0);
size_t num = out.get_number_of_elements() / len;
long long k;
const T *in_ptr = in.get_data_ptr();
T *out_ptr = out.get_data_ptr();
#pragma omp parallel default(none) private(k) shared(in_ptr, out_ptr, num, len, in, out, crop_offset)
{
std::vector<size_t> ind;
#pragma omp for
for (k = 0; k < (long long)num; k++){
ind = out.calculate_index(k*len);
for (unsigned int d = 0; d < D; d++){
ind[d] += crop_offset[d];
}
const T* in_ptr_curr = in_ptr + in.calculate_offset(ind);
memcpy(out_ptr + k*len, in_ptr_curr, sizeof(T)*len);
}
}
}
/**
* @param[in] crop_size
* @param[in] in input array
* Crop the input array around its center N/2; that is, the center pixel of in array is the center pixel of out array
*/
template<class T, unsigned int D> hoNDArray<T>
crop(const vector_td<size_t, D>& crop_size, const hoNDArray<T>& in)
{
// compute crop offset, perserving the center
hoNDArray<T> out;
auto crop_offset = (from_std_vector<size_t,D>(*in.get_dimensions())-crop_size)/size_t(2);
crop(crop_offset, crop_size, in, out);
return out;
}
template<class T> void
crop(size_t x, const hoNDArray<T>& in, hoNDArray<T>& out)
{
vector_td<size_t, 1> crop_size(x);
auto crop_offset = (from_std_vector<size_t,1>(*in.get_dimensions())-crop_size)/size_t(2);
crop(crop_offset, crop_size, in, out);
}
template<class T> void
crop(size_t x, size_t y, const hoNDArray<T>& in, hoNDArray<T>& out)
{
vector_td<size_t, 2> crop_size(x, y);
auto crop_offset = (from_std_vector<size_t,2>(*in.get_dimensions())-crop_size)/size_t(2);
crop(crop_offset,crop_size, in, out);
}
template<class T> void
crop(size_t x, size_t y, size_t z, const hoNDArray<T>& in, hoNDArray<T>& out)
{
vector_td<size_t, 3> crop_size(x, y, z);
auto crop_offset = (from_std_vector<size_t,3>(*in.get_dimensions())-crop_size)/size_t(2);
crop(crop_offset, crop_size, in, out);
}
template<class T, unsigned int D> hoNDArray<T>
crop( const vector_td<size_t, D>& crop_offset, const vector_td<size_t, D>& crop_size, const hoNDArray<T>& in )
{
auto out = hoNDArray<T>();
crop(crop_offset, crop_size, in, out);
return out;
}
/**
* @param[in] offset_src starting position in src array
* @param[in] size Size of subarray to be replaced
* @param[in] src Src array to read in replaced content
* @param[in] offset_dst starting position in dst array
* @param[out] dst array to be replaced; other part outside the offset+size region will be unchanged
*/
template<class T, unsigned int D> void
fill(const vector_td<size_t, D>& offset_src, const vector_td<size_t, D>& size, hoNDArray<T> *src, const vector_td<size_t, D>& offset_dst, hoNDArray<T> *dst)
{
if (src == 0x0) {
throw std::runtime_error("replace: 0x0 src array provided");;
}
if (src->get_number_of_dimensions() < D)
{
std::stringstream ss;
ss << "fill: number of src image dimensions should be at least " << D;
throw std::runtime_error(ss.str());;
}
if (dst == 0x0)
{
throw std::runtime_error("replace: 0x0 dst array provided");;
}
if (dst->get_number_of_dimensions() < D)
{
std::stringstream ss;
ss << "fill: number of dst image dimensions should be at least " << D;
throw std::runtime_error(ss.str());;
}
if (src->get_number_of_dimensions() != dst->get_number_of_dimensions())
{
std::stringstream ss;
ss << "fill: src and dst array have different number of dimensions " << D;
throw std::runtime_error(ss.str());;
}
std::vector<size_t> src_dim;
src->get_dimensions(src_dim);
std::vector<size_t> dst_dim;
dst->get_dimensions(dst_dim);
size_t d;
for (d = 0; d < D; d++)
{
if (src_dim[d] < offset_src[d]+size[d]-1)
{
throw std::runtime_error("fill: src array is too small for provided offset and size");;
}
if (dst_dim[d] < offset_dst[d] + size[d] - 1)
{
throw std::runtime_error("fill: dst array is too small for provided offset and size");;
}
}
size_t len = size[0];
size_t num = 1;
for (d = 1; d < D; d++) num *= size[d];
long long k;
T *src_ptr = src->get_data_ptr();
T *dst_ptr = dst->get_data_ptr();
std::vector<size_t> size_dim = to_std_vector(size);
hoNDArray<T> array_size;
array_size.create(size_dim, src->begin());
{
std::vector<size_t> ind_src = src->calculate_index(0);
std::vector<size_t> ind_dst = dst->calculate_index(0);
std::vector<size_t> ind_size(D, 0);
for (k = 0; k < (long long)num; k++)
{
ind_size = array_size.calculate_index(k*len);
for (unsigned int d = 0; d < D; d++)
{
ind_src[d] = offset_src[d] + ind_size[d];
ind_dst[d] = offset_dst[d] + ind_size[d];
}
T* src_ptr_curr = src_ptr + src->calculate_offset(ind_src);
T* dst_ptr_curr = dst_ptr + dst->calculate_offset(ind_dst);
memcpy(dst_ptr_curr, src_ptr_curr, sizeof(T)*len);
}
}
}
template<class T, unsigned int D> void
fill(const vector_td<size_t, D>& offset_src, hoNDArray<T>& src, const vector_td<size_t, D>& offset_dst, hoNDArray<T>& dst)
{
std::vector<size_t> dim;
src.get_dimensions(dim);
vector_td<size_t, D> size;
if (dim.size() < D)
{
std::stringstream ss;
ss << "fill: number of src image dimensions should be at least " << D;
throw std::runtime_error(ss.str());;
}
size_t d;
for (d = 0; d < D; d++) size[d] = dim[d];
Gadgetron::fill(offset_src, size, &src, offset_dst, &dst);
}
template<class T, unsigned int D> void
fill(hoNDArray<T>& src, const vector_td<size_t, D>& offset_dst, hoNDArray<T>& dst)
{
std::vector<size_t> dim;
src.get_dimensions(dim);
vector_td<size_t, D> offset_src, size;
if (dim.size() < D)
{
std::stringstream ss;
ss << "fill: number of src image dimensions should be at least " << D;
throw std::runtime_error(ss.str());;
}
size_t d;
for (d = 0; d < D; d++)
{
offset_src[d] = 0;
size[d] = dim[d];
}
Gadgetron::fill(offset_src, size, &src, offset_dst, &dst);
}
/**
* @param[in] size Size of the output array
* @param[in] in Input array
* @param[out] out Output array after padding
* @param[in] preset_out_with_val if true, out array will be filled with val before padding
* @param[in] val Value to use for padding
* The padding operations keep the center of array unchanged, e.g. the center is always N/2
*/
template<class T, unsigned int D> void
pad(const typename uint64d<D>::Type& size, const hoNDArray<T>& in, hoNDArray<T>& out, bool preset_out_with_val = true, T val = T(0))
{
if (in.get_number_of_dimensions() < D){
std::stringstream ss;
ss << "pad: number of image dimensions should be at least " << D;
throw std::runtime_error(ss.str());;
}
unsigned int d;
std::vector<size_t> dims = to_std_vector(size);
for (d = D; d<in.get_number_of_dimensions(); d++){
dims.push_back(in.get_size(d));
}
if (!out.dimensions_equal(&dims)){
out.create(dims);
}
if (in.dimensions_equal(&dims)){
memcpy(out.begin(), in.begin(), in.get_number_of_bytes());
return;
}
const T *in_ptr = in.get_data_ptr();
T *out_ptr = out.get_data_ptr();
if (preset_out_with_val){
if (val == T(0)){
memset(out_ptr, 0, out.get_number_of_bytes());
}
else{
size_t N = out.get_number_of_elements();
long long n;
#pragma omp parallel for default(none) private(n) shared(N, out_ptr, val)
for (n = 0; n<(long long)N; n++)
{
out_ptr[n] = val;
}
}
}
typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t, D>(*in.get_dimensions());
typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t, D>(*out.get_dimensions());
if (weak_greater(matrix_size_in, matrix_size_out)){
throw std::runtime_error("pad: size mismatch, cannot expand");
}
typename uint64d<D>::Type offset(D);
for (d = 0; d<D; d++){
offset[d] = matrix_size_out[d]/2 - matrix_size_in[d]/2;
}
size_t len = in.get_size(0);
size_t num = in.get_number_of_elements() / len;
long long k;
#pragma omp parallel default(none) private(k, d) shared(in_ptr, out_ptr, num, len, in, out, offset)
{
std::vector<size_t> ind;
#pragma omp for
for (k = 0; k < (long long)num; k++){
ind = in.calculate_index(k*len);
for (d = 0; d < D; d++){
ind[d] += offset[d];
}
T* out_ptr_curr = out_ptr + out.calculate_offset(ind);
memcpy(out_ptr_curr, in_ptr + k*len, sizeof(T)*len);
}
}
}
template<class T, unsigned int D> void pad(const hoNDArray<T>& in, hoNDArray<T>& out, T val = T(0)){
vector_td<size_t,D> dims = from_std_vector<size_t,D>(*out.get_dimensions());
pad<T,D>(dims,in,out,true, val);
}
template<class T> void
pad(size_t x, const hoNDArray<T>& in, hoNDArray<T>& out, bool preset_out_with_val = true, T val = T(0))
{
typename uint64d<1>::Type padSize(x);
pad<T, 1>(padSize, in, out, preset_out_with_val, val);
}
template<class T> void
pad(size_t x, size_t y, const hoNDArray<T>& in, hoNDArray<T>& out, bool preset_out_with_val = true, T val = T(0))
{
typename uint64d<2>::Type padSize(x, y);
pad<T, 2>(padSize, in, out, preset_out_with_val, val);
}
template<class T> void
pad(size_t x, size_t y, size_t z, const hoNDArray<T> &in, hoNDArray<T>& out, bool preset_out_with_val = true, T val = T(0))
{
typename uint64d<3>::Type padSize(x, y, z);
pad<T, 3>(padSize, in, out, preset_out_with_val, val);
}
/**
* @param[in] size Size of the output array
* @param[in] in Input array
* @param[in] val Value to use for padding
* @returns New array of the specified size, containing the original input array in the center and val outside.
*/
template<class T, unsigned int D> hoNDArray<T>
pad(const typename uint64d<D>::Type& size, const hoNDArray<T> & in, T val = T(0))
{
auto out = hoNDArray<T>();
pad<T,D>(size, in, out, true, val);
return out;
}
/// copy the sub array x(:, indLastDim) to all other places of the last dimensions
template<typename T>
bool repmatLastDimension(hoNDArray<T>& x, size_t indLastDim)
{
try
{
size_t NDim = x.get_number_of_dimensions();
size_t lastDim = x.get_size(NDim-1);
GADGET_CHECK_RETURN_FALSE( indLastDim < lastDim );
std::vector<size_t> ind(NDim, 0);
ind[NDim-1] = indLastDim;
size_t offsetIndLastDim = x.calculate_offset(ind);
size_t N = x.get_number_of_elements() / lastDim;
long long l;
#pragma omp parallel default(none) private(l) shared(lastDim, offsetIndLastDim, x, ind, indLastDim, N, NDim)
{
std::vector<size_t> indLocal(ind);
#pragma omp for
for ( l=0; l<(long long)lastDim; l++ )
{
if ( l==indLastDim ) continue;
indLocal[NDim-1] = l;
size_t offsetInd = x.calculate_offset(indLocal);
memcpy(x.begin()+offsetInd, x.begin()+offsetIndLastDim, sizeof(T)*N);
}
}
}
catch (...)
{
GERROR_STREAM("Errors in repmatLastDimension(hoNDArray<T>& x, size_t indLastDim) ... ");
return false;
}
return true;
}
// Utility to check if all neighbors required for the linear interpolation exists
// ... do not include dimensions of size 1
template<class REAL, unsigned int D> inline bool
is_border_pixel( vector_td<size_t,D> co, vector_td<size_t,D> dims )
{
for( size_t dim=0; dim<D; dim++ ){
if( dims[dim] > 1 && ( co[dim] == 0 || co[dim] == (dims[dim]-1) ) )
return true;
}
return false;
}
// Downsample
template<class REAL, unsigned int D>
hoNDArray<REAL> downsample(const hoNDArray<REAL>& _in )
{
// A few sanity checks
if( _in.get_number_of_dimensions() < D ){
throw std::runtime_error( "downsample(): the number of array dimensions should be at least D");
}
for( size_t d=0; d<D; d++ ){
if( (_in.get_size(d)%2) == 1 && _in.get_size(d) != 1 ){
throw std::runtime_error( "downsample(): uneven array dimensions larger than one not accepted");
}
}
typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *_in.get_dimensions() );
typename uint64d<D>::Type matrix_size_out = matrix_size_in >> 1;
for( size_t d=0; d<D; d++ ){
if( matrix_size_out[d] == 0 )
matrix_size_out[d] = 1;
}
size_t num_elements = prod(matrix_size_out);
size_t num_batches = 1;
for( size_t d=D; d<_in.get_number_of_dimensions(); d++ ){
num_batches *= _in.get_size(d);
}
std::vector<size_t> dims = to_std_vector(matrix_size_out);
for( size_t d=D; d<_in.get_number_of_dimensions(); d++ ){
dims.push_back(_in.get_size(d));
}
const REAL *in = _in.get_data_ptr();
hoNDArray<REAL> _out( dims );
REAL *out = _out.get_data_ptr();
typedef vector_td<size_t,D> uint64d;
#ifdef USE_OMP
#pragma omp parallel for
#endif
for( int64_t idx=0; idx < num_elements*num_batches; idx++ ){
const size_t frame_offset = idx/num_elements;
const uint64d co_out = idx_to_co<uint64_t,D>( idx-frame_offset*num_elements, matrix_size_out );
const uint64d co_in = co_out << 1;
const uint64d twos(2);
const size_t num_adds = 1 << D;
size_t actual_adds = 0;
REAL res = REAL(0);
for( size_t i=0; i<num_adds; i++ ){
const uint64d local_co = idx_to_co( i, twos );
if( weak_greater_equal( local_co, matrix_size_out ) ) continue; // To allow array dimensions of size 1
const size_t in_idx = co_to_idx(co_in+local_co, matrix_size_in)+frame_offset*prod(matrix_size_in);
actual_adds++;
res += in[in_idx];
}
out[idx] = res/REAL(actual_adds);
}
return _out;
}
namespace {
template<class T> hoNDArray<T> upsample_along_dimension(const hoNDArray<T>& array,int dim){
auto new_dims = *array.get_dimensions();
auto old_dim = new_dims[dim];
new_dims[dim] *= 2;
hoNDArray<T> result(new_dims);
size_t stride = std::accumulate(new_dims.begin(),new_dims.begin()+dim,size_t(1),std::multiplies<size_t>());
size_t nbatches = result.get_number_of_elements()/stride/new_dims[dim];
size_t batch_size = stride*new_dims[dim];
size_t old_batch_size = batch_size/2;
#pragma omp parallel for
for (int batch = 0; batch < nbatches; batch++){
T* result_ptr = result.get_data_ptr()+batch_size*batch;
const T* input_ptr = array.get_data_ptr()+batch*old_batch_size;
for (size_t i = 0; i < old_dim-1; i++){
for (size_t k = 0; k < stride; k++){
result_ptr[2*i*stride+k] = input_ptr[i*stride+k];
result_ptr[(2*i+1)*stride+k] = (input_ptr[i*stride+k]+input_ptr[i*stride+k])/2;
}
}
size_t i = old_dim-1;
for (size_t k = 0; k < stride; k++){
result_ptr[2*i*stride+k] = input_ptr[i*stride+k];
result_ptr[(2*i+1)*stride+k] = input_ptr[i*stride+k];
}
}
return result;
}
template<class T> hoNDArray<T> upsample_spline_along_dimension(const hoNDArray<T>& array,int dim,int scale){
namespace ba = boost::adaptors;
namespace bm = boost::math;
auto new_dims = *array.get_dimensions();
auto old_dim = new_dims[dim];
new_dims[dim] *= 2;
hoNDArray<T> result(new_dims);
size_t stride = std::accumulate(new_dims.begin(),new_dims.begin()+dim,size_t(1),std::multiplies<size_t>());
size_t nbatches = result.get_number_of_elements()/stride/new_dims[dim];
size_t batch_size = stride*new_dims[dim];
size_t old_batch_size = batch_size/2;
#pragma omp parallel for
for (int batch = 0; batch < (int)nbatches; batch++){
T* result_ptr = result.get_data_ptr()+batch_size*batch;
const T* input_ptr = array.get_data_ptr()+batch*old_batch_size;
for (size_t k = 0; k < stride; k++){
auto strided_iterator = std::make_pair(input_ptr+k,input_ptr+k+old_batch_size) | ba::strided(stride);
auto spline = bm::interpolators::cardinal_cubic_b_spline(
boost::begin(strided_iterator),
boost::end(strided_iterator),
T(0.25)*scale, T(scale), T(0), T(0)
);
for (int i = 0; i < new_dims[dim]; i++){
result_ptr[k+i*stride] = spline(i);
}
}
}
return result;
}
}
// Linear interpolation upsampling
template<class T, unsigned int D> hoNDArray<T>
upsample( const hoNDArray<T>& in )
{
if( in.get_number_of_dimensions() < D ){
throw std::runtime_error( "upsample(): the number of array dimensions should be at least D");
}
hoNDArray<T> result = in;
for (int i = D-1; i >= 0; i--){
result = upsample_along_dimension<T>(result,i);
}
return result;
}
template<class T, unsigned int D> hoNDArray<T>
upsample_spline( const hoNDArray<T>& in, int scale = 2 )
{
if( in.get_number_of_dimensions() < D ){
throw std::runtime_error( "upsample(): the number of array dimensions should be at least D");
}
hoNDArray<T> result = in;
for (int i = D-1; i >= 0; i--){
result = upsample_spline_along_dimension<T>(result,i,scale);
}
return result;
}
// Linear interpolation upsampling
template<class T, unsigned int D> hoNDArray<T>
upsample_nearest( const hoNDArray<T>& in )
{
// A few sanity checks
if( in.get_number_of_dimensions() < D ){
throw std::runtime_error( "upsample(): the number of array dimensions should be at least D");
}
typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *in.get_dimensions() );
typename uint64d<D>::Type matrix_size_out = matrix_size_in << 1;
for( size_t d=0; d<D; d++ ){
if( matrix_size_in[d] == 1 )
matrix_size_out[d] = 1;
}
size_t num_elements = prod(matrix_size_out);
size_t num_batches = 1;
for( size_t d=D; d<in.get_number_of_dimensions(); d++ ){
num_batches *= in.get_size(d);
}
std::vector<size_t> dims = to_std_vector(matrix_size_out);
for( size_t d=D; d<in.get_number_of_dimensions(); d++ ){
dims.push_back(in.get_size(d));
}
const T *in_ptr = in.get_data_ptr();
hoNDArray<T> out(&dims);
T *out_ptr = out.get_data_ptr();
typedef vector_td<size_t,D> uint64d;
#ifdef USE_OMP
#pragma omp parallel for
#endif
for( long long idx=0; idx < num_elements*num_batches; idx++ ){
const size_t frame_idx = idx/num_elements;
const uint64d co_out = idx_to_co<D>( idx-frame_idx*num_elements, matrix_size_out );
uint64d co_in = co_out/uint64_t(2);
const size_t in_idx = co_to_idx<D>(co_in, matrix_size_in)+frame_idx*prod(matrix_size_in);
out_ptr[idx] = in_ptr[in_idx];
}
return out;
}
template<class T> hoNDArray<T> repeat(const hoNDArray<T>& array,unsigned int repeats){
auto dims = array.dimensions();
dims.push_back(repeats);
hoNDArray<T> output(dims);
for (auto span : spans(output, array.get_number_of_dimensions())) {
span = array;
}
return output;
}
/**
* This functions takes a collection of hoNDArrays and concatenates them along the specified dimension
* @tparam COLL Collection of hoNDArray such as std::vector<hoNDArray<float>>.
* @param arrays The hoNDArrays. Must be of equal size, except along the concat dimension
* @param dimension Dimension along which to concatenate
* @return The concatenated arrays.
*/
template <class COLL> auto concat_along_dimension(const COLL& arrays, size_t dimension) {
using namespace ranges;
using T = std::decay_t<decltype(*std::begin(*std::begin(arrays)))>;
if (arrays.empty())
return hoNDArray<T>();
const hoNDArray<T>& first = *std::begin(arrays);
std::vector dims = first.dimensions();
size_t count = ranges::accumulate(arrays | views::transform([dimension](const auto& array) {
return array.dimensions().at(dimension);
}),
size_t(0));
dims[dimension] = count;
auto dimensions_valid = [&dims,dimension](const auto& array) {
bool result = true;
const auto& d = array.dimensions();
for (size_t i = 0; i < d.size(); i++) {
if (i == dimension)
continue;
result &= d[i] == dims[i];
}
return result && (d.size() == dims.size());
};
bool all_dimensions_valid = ranges::accumulate(arrays | views::transform(dimensions_valid), true, std::logical_and() );
if (!all_dimensions_valid) throw std::runtime_error("The dimensions of all provided arrays must be equal except along the concatenate dimension");
auto result = hoNDArray<T>(dims);
const size_t inner_stride = ranges::accumulate(dims | views::slice(size_t(0), dimension),
size_t(1), std::multiplies());
const size_t outer_stride = inner_stride * count;
size_t current_slice = 0;
for (const auto& array : arrays) {
size_t slice_count = array.dimensions()[dimension];
auto array_inner_stride = slice_count * inner_stride;
auto repetitions = array.size() / array_inner_stride;
for (int i = 0; i < repetitions; i++) {
std::copy_n(array.begin() + i * array_inner_stride, array_inner_stride,
result.begin() + current_slice * inner_stride + outer_stride * i);
}
current_slice += slice_count;
}
return result;
}
template<class COLL>
auto concat(const COLL &arrays) {
using T = std::decay_t<decltype(*std::begin(*std::begin(arrays)))>;
if (arrays.empty()) return hoNDArray<T>();
const hoNDArray<T> &first = *std::begin(arrays);
auto dims = first.dimensions();
auto size = first.size();
using std::begin;
using std::end;
if (!std::all_of(begin(arrays), end(arrays), [&](const auto &array) { return dims == array.dimensions(); }) ||
!std::all_of(begin(arrays), end(arrays), [&](const auto &array) { return size == array.size(); })) {
throw std::runtime_error("Array size or dimensions do not match.");
}
dims.push_back(arrays.size());
hoNDArray<T> output(dims);
auto output_iterator = spans(output, first.get_number_of_dimensions()).begin();
for (const auto& array : arrays) {
*output_iterator = array;
++output_iterator;
}
return output;
}
template<class T, class... ARRAYS>
hoNDArray<T> concat(const hoNDArray<T>& first_array, const ARRAYS& ... arrays){
static_assert((std::is_same_v<hoNDArray<T>,std::decay_t<ARRAYS>> && ...));
using namespace ranges;
return concat(views::concat(views::single(first_array),views::single(arrays)...));
}
}
|
GB_binop__div_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__div_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__div_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__div_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__div_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__div_uint64)
// A*D function (colscale): GB (_AxD__div_uint64)
// D*A function (rowscale): GB (_DxB__div_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__div_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__div_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_uint64)
// C=scalar+B GB (_bind1st__div_uint64)
// C=scalar+B' GB (_bind1st_tran__div_uint64)
// C=A+scalar GB (_bind2nd__div_uint64)
// C=A'+scalar GB (_bind2nd_tran__div_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = GB_IDIV_UNSIGNED (aij, bij, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IDIV_UNSIGNED (x, y, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_UINT64 || GxB_NO_DIV_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__div_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__div_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__div_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__div_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__div_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (x, bij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__div_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IDIV_UNSIGNED (aij, y, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (x, aij, 64) ; \
}
GrB_Info GB (_bind1st_tran__div_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, y, 64) ; \
}
GrB_Info GB (_bind2nd_tran__div_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nvector_openmp.c | /* -----------------------------------------------------------------
* Programmer(s): David J. Gardner and Carol S. Woodward @ LLNL
* -----------------------------------------------------------------
* Acknowledgements: This NVECTOR module is based on the NVECTOR
* Serial module by Scott D. Cohen, Alan C.
* Hindmarsh, Radu Serban, and Aaron Collier
* @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for an OpenMP implementation
* of the NVECTOR module.
* -----------------------------------------------------------------*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_math.h>
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
#define ONE RCONST(1.0)
#define ONEPT5 RCONST(1.5)
/* Private functions for special cases of vector operations */
static void VCopy_OpenMP(N_Vector x, N_Vector z); /* z=x */
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x+y */
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z); /* z=x-y */
static void VNeg_OpenMP(N_Vector x, N_Vector z); /* z=-x */
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x+y) */
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z); /* z=c(x-y) */
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax+y */
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z); /* z=ax-y */
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y); /* y <- ax+y */
static void VScaleBy_OpenMP(realtype a, N_Vector x); /* x <- ax */
/* Private functions for special cases of vector array operations */
static int VSumVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X+Y */
static int VDiffVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=X-Y */
static int VScaleSumVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X+Y) */
static int VScaleDiffVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=c(X-Y) */
static int VLin1VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX+Y */
static int VLin2VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z); /* Z=aX-Y */
static int VaxpyVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y); /* Y <- aX+Y */
/*
* -----------------------------------------------------------------
* exported functions
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------
* Returns vector type ID. Used to identify vector implementation
* from abstract N_Vector interface.
*/
N_Vector_ID N_VGetVectorID_OpenMP(N_Vector v)
{
return SUNDIALS_NVEC_OPENMP;
}
/* ----------------------------------------------------------------------------
* Function to create a new empty vector
*/
N_Vector N_VNewEmpty_OpenMP(sunindextype length, int num_threads)
{
N_Vector v;
N_VectorContent_OpenMP content;
/* Create vector */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
/* constructors, destructors, and utility operations */
v->ops->nvgetvectorid = N_VGetVectorID_OpenMP;
v->ops->nvclone = N_VClone_OpenMP;
v->ops->nvcloneempty = N_VCloneEmpty_OpenMP;
v->ops->nvdestroy = N_VDestroy_OpenMP;
v->ops->nvspace = N_VSpace_OpenMP;
v->ops->nvgetarraypointer = N_VGetArrayPointer_OpenMP;
v->ops->nvsetarraypointer = N_VSetArrayPointer_OpenMP;
v->ops->nvgetlength = N_VGetLength_OpenMP;
/* standard vector operations */
v->ops->nvlinearsum = N_VLinearSum_OpenMP;
v->ops->nvconst = N_VConst_OpenMP;
v->ops->nvprod = N_VProd_OpenMP;
v->ops->nvdiv = N_VDiv_OpenMP;
v->ops->nvscale = N_VScale_OpenMP;
v->ops->nvabs = N_VAbs_OpenMP;
v->ops->nvinv = N_VInv_OpenMP;
v->ops->nvaddconst = N_VAddConst_OpenMP;
v->ops->nvdotprod = N_VDotProd_OpenMP;
v->ops->nvmaxnorm = N_VMaxNorm_OpenMP;
v->ops->nvwrmsnormmask = N_VWrmsNormMask_OpenMP;
v->ops->nvwrmsnorm = N_VWrmsNorm_OpenMP;
v->ops->nvmin = N_VMin_OpenMP;
v->ops->nvwl2norm = N_VWL2Norm_OpenMP;
v->ops->nvl1norm = N_VL1Norm_OpenMP;
v->ops->nvcompare = N_VCompare_OpenMP;
v->ops->nvinvtest = N_VInvTest_OpenMP;
v->ops->nvconstrmask = N_VConstrMask_OpenMP;
v->ops->nvminquotient = N_VMinQuotient_OpenMP;
/* fused and vector array operations are disabled (NULL) by default */
/* local reduction kernels */
v->ops->nvdotprodlocal = N_VDotProd_OpenMP;
v->ops->nvmaxnormlocal = N_VMaxNorm_OpenMP;
v->ops->nvminlocal = N_VMin_OpenMP;
v->ops->nvl1normlocal = N_VL1Norm_OpenMP;
v->ops->nvinvtestlocal = N_VInvTest_OpenMP;
v->ops->nvconstrmasklocal = N_VConstrMask_OpenMP;
v->ops->nvminquotientlocal = N_VMinQuotient_OpenMP;
v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_OpenMP;
v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_OpenMP;
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = length;
content->num_threads = num_threads;
content->own_data = SUNFALSE;
content->data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a new vector
*/
N_Vector N_VNew_OpenMP(sunindextype length, int num_threads)
{
N_Vector v;
realtype *data;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads);
if (v == NULL) return(NULL);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNTRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create a vector with user data component
*/
N_Vector N_VMake_OpenMP(sunindextype length, realtype *v_data, int num_threads)
{
N_Vector v;
v = NULL;
v = N_VNewEmpty_OpenMP(length, num_threads);
if (v == NULL) return(NULL);
if (length > 0) {
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNFALSE;
NV_DATA_OMP(v) = v_data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors.
*/
N_Vector* N_VCloneVectorArray_OpenMP(int count, N_Vector w)
{
N_Vector* vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector*) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VClone_OpenMP(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMP(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to create an array of new vectors with NULL data array.
*/
N_Vector* N_VCloneVectorArrayEmpty_OpenMP(int count, N_Vector w)
{
N_Vector* vs;
int j;
if (count <= 0) return(NULL);
vs = NULL;
vs = (N_Vector*) malloc(count * sizeof(N_Vector));
if(vs == NULL) return(NULL);
for (j = 0; j < count; j++) {
vs[j] = NULL;
vs[j] = N_VCloneEmpty_OpenMP(w);
if (vs[j] == NULL) {
N_VDestroyVectorArray_OpenMP(vs, j-1);
return(NULL);
}
}
return(vs);
}
/* ----------------------------------------------------------------------------
* Function to free an array created with N_VCloneVectorArray_OpenMP
*/
void N_VDestroyVectorArray_OpenMP(N_Vector* vs, int count)
{
int j;
for (j = 0; j < count; j++) N_VDestroy_OpenMP(vs[j]);
free(vs); vs = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Function to return number of vector elements
*/
sunindextype N_VGetLength_OpenMP(N_Vector v)
{
return NV_LENGTH_OMP(v);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to stdout
*/
void N_VPrint_OpenMP(N_Vector x)
{
N_VPrintFile_OpenMP(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print a vector to outfile
*/
void N_VPrintFile_OpenMP(N_Vector x, FILE *outfile)
{
sunindextype i, N;
realtype *xd;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
for (i = 0; i < N; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
STAN_SUNDIALS_FPRINTF(outfile, "%11.8Lg\n", xd[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]);
#else
STAN_SUNDIALS_FPRINTF(outfile, "%11.8g\n", xd[i]);
#endif
}
STAN_SUNDIALS_FPRINTF(outfile, "\n");
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Create new vector from existing vector without attaching data
*/
N_Vector N_VCloneEmpty_OpenMP(N_Vector w)
{
N_Vector v;
N_VectorContent_OpenMP content;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); }
/* Create content */
content = NULL;
content = (N_VectorContent_OpenMP) malloc(sizeof *content);
if (content == NULL) { N_VDestroy(v); return(NULL); }
/* Attach content */
v->content = content;
/* Initialize content */
content->length = NV_LENGTH_OMP(w);
content->num_threads = NV_NUM_THREADS_OMP(w);
content->own_data = SUNFALSE;
content->data = NULL;
return(v);
}
/* ----------------------------------------------------------------------------
* Create new vector from existing vector and attach data
*/
N_Vector N_VClone_OpenMP(N_Vector w)
{
N_Vector v;
realtype *data;
sunindextype length;
v = NULL;
v = N_VCloneEmpty_OpenMP(w);
if (v == NULL) return(NULL);
length = NV_LENGTH_OMP(w);
/* Create data */
if (length > 0) {
/* Allocate memory */
data = NULL;
data = (realtype *) malloc(length * sizeof(realtype));
if(data == NULL) { N_VDestroy_OpenMP(v); return(NULL); }
/* Attach data */
NV_OWN_DATA_OMP(v) = SUNTRUE;
NV_DATA_OMP(v) = data;
}
return(v);
}
/* ----------------------------------------------------------------------------
* Destroy vector and free vector memory
*/
void N_VDestroy_OpenMP(N_Vector v)
{
if (v == NULL) return;
/* free content */
if (v->content != NULL) {
/* free data array if it's owned by the vector */
if (NV_OWN_DATA_OMP(v) && NV_DATA_OMP(v) != NULL) {
free(NV_DATA_OMP(v));
NV_DATA_OMP(v) = NULL;
}
free(v->content);
v->content = NULL;
}
/* free ops and vector */
if (v->ops != NULL) { free(v->ops); v->ops = NULL; }
free(v); v = NULL;
return;
}
/* ----------------------------------------------------------------------------
* Get storage requirement for N_Vector
*/
void N_VSpace_OpenMP(N_Vector v, sunindextype *lrw, sunindextype *liw)
{
*lrw = NV_LENGTH_OMP(v);
*liw = 1;
return;
}
/* ----------------------------------------------------------------------------
* Get vector data pointer
*/
realtype *N_VGetArrayPointer_OpenMP(N_Vector v)
{
return((realtype *) NV_DATA_OMP(v));
}
/* ----------------------------------------------------------------------------
* Set vector data pointer
*/
void N_VSetArrayPointer_OpenMP(realtype *v_data, N_Vector v)
{
if (NV_LENGTH_OMP(v) > 0) NV_DATA_OMP(v) = v_data;
return;
}
/* ----------------------------------------------------------------------------
* Compute linear combination z[i] = a*x[i]+b*y[i]
*/
void N_VLinearSum_OpenMP(realtype a, N_Vector x, realtype b, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype c, *xd, *yd, *zd;
N_Vector v1, v2;
booleantype test;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
if ((b == ONE) && (z == y)) { /* BLAS usage: axpy y <- ax+y */
Vaxpy_OpenMP(a,x,y);
return;
}
if ((a == ONE) && (z == x)) { /* BLAS usage: axpy x <- by+x */
Vaxpy_OpenMP(b,y,x);
return;
}
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE)) {
VSum_OpenMP(x, y, z);
return;
}
/* Cases: (1) a == 1.0, b = -1.0, (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
v1 = test ? y : x;
v2 = test ? x : y;
VDiff_OpenMP(v2, v1, z);
return;
}
/* Cases: (1) a == 1.0, b == other or 0.0, (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin1_OpenMP(c, v1, v2, z);
return;
}
/* Cases: (1) a == -1.0, b != 1.0, (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
v1 = test ? y : x;
v2 = test ? x : y;
VLin2_OpenMP(c, v1, v2, z);
return;
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b) {
VScaleSum_OpenMP(a, x, y, z);
return;
}
/* Case: a == -b */
if (a == -b) {
VScaleDiff_OpenMP(a, x, y, z);
return;
}
/* Do all cases not handled above:
(1) a == other, b == 0.0 - user should have called N_VScale
(2) a == 0.0, b == other - user should have called N_VScale
(3) a,b == other, a !=b, a != -b */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,b,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+(b*yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Assigns constant value to all vector elements, z[i] = c
*/
void N_VConst_OpenMP(realtype c, N_Vector z)
{
sunindextype i, N;
realtype *zd;
i = 0; /* initialize to suppress clang warning */
zd = NULL;
N = NV_LENGTH_OMP(z);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(z))
for (i = 0; i < N; i++) zd[i] = c;
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise product z[i] = x[i]*y[i]
*/
void N_VProd_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]*yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise division z[i] = x[i]/y[i]
*/
void N_VDiv_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]/yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaler multiplication z[i] = c*x[i]
*/
void N_VScale_OpenMP(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
if (z == x) { /* BLAS usage: scale x <- cx */
VScaleBy_OpenMP(c, x);
return;
}
if (c == ONE) {
VCopy_OpenMP(x, z);
} else if (c == -ONE) {
VNeg_OpenMP(x, z);
} else {
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*xd[i];
}
return;
}
/* ----------------------------------------------------------------------------
* Compute absolute value of vector components z[i] = SUNRabs(x[i])
*/
void N_VAbs_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = SUNRabs(xd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = 1 / x[i]
*/
void N_VInv_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = ONE/xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise addition of a scaler to a vector z[i] = x[i] + b
*/
void N_VAddConst_OpenMP(N_Vector x, realtype b, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,b,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+b;
return;
}
/* ----------------------------------------------------------------------------
* Computes the dot product of two vectors, a = sum(x[i]*y[i])
*/
realtype N_VDotProd_OpenMP(N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype sum, *xd, *yd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += xd[i]*yd[i];
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes max norm of a vector
*/
realtype N_VMaxNorm_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype tmax, max, *xd;
i = 0; /* initialize to suppress clang warning */
max = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel default(none) private(i,tmax) shared(N,max,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmax = ZERO;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (SUNRabs(xd[i]) > tmax) tmax = SUNRabs(xd[i]);
}
#pragma omp critical
{
if (tmax > max)
max = tmax;
}
}
return(max);
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a vector
*/
realtype N_VWrmsNorm_OpenMP(N_Vector x, N_Vector w)
{
return(SUNRsqrt(N_VWSqrSumLocal_OpenMP(x, w)/(NV_LENGTH_OMP(x))));
}
/* ----------------------------------------------------------------------------
* Computes weighted root mean square norm of a masked vector
*/
realtype N_VWrmsNormMask_OpenMP(N_Vector x, N_Vector w, N_Vector id)
{
return(SUNRsqrt(N_VWSqrSumMaskLocal_OpenMP(x, w, id)/(NV_LENGTH_OMP(x))));
}
/* ----------------------------------------------------------------------------
* Finds the minimun component of a vector
*/
realtype N_VMin_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype min, *xd;
realtype tmin;
i = 0; /* initialize to suppress clang warning */
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
min = xd[0];
#pragma omp parallel default(none) private(i,tmin) shared(N,min,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
tmin = xd[0];
#pragma omp for schedule(static)
for (i = 1; i < N; i++) {
if (xd[i] < tmin) tmin = xd[i];
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted L2 norm of a vector
*/
realtype N_VWL2Norm_OpenMP(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd, *wd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(SUNRsqrt(sum));
}
/* ----------------------------------------------------------------------------
* Computes L1 norm of a vector
*/
realtype N_VL1Norm_OpenMP(N_Vector x)
{
sunindextype i, N;
realtype sum, *xd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,xd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i<N; i++)
sum += SUNRabs(xd[i]);
return(sum);
}
/* ----------------------------------------------------------------------------
* Compare vector component values to a scaler
*/
void N_VCompare_OpenMP(realtype c, N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
zd[i] = (SUNRabs(xd[i]) >= c) ? ONE : ZERO;
}
return;
}
/* ----------------------------------------------------------------------------
* Compute componentwise inverse z[i] = ONE/x[i] and checks if x[i] == ZERO
*/
booleantype N_VInvTest_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd, val;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
val = ZERO;
#pragma omp parallel for default(none) private(i) shared(N,val,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (xd[i] == ZERO)
val = ONE;
else
zd[i] = ONE/xd[i];
}
if (val > ZERO)
return (SUNFALSE);
else
return (SUNTRUE);
}
/* ----------------------------------------------------------------------------
* Compute constraint mask of a vector
*/
booleantype N_VConstrMask_OpenMP(N_Vector c, N_Vector x, N_Vector m)
{
sunindextype i, N;
realtype temp;
realtype *cd, *xd, *md;
booleantype test;
i = 0; /* initialize to suppress clang warning */
cd = xd = md = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
cd = NV_DATA_OMP(c);
md = NV_DATA_OMP(m);
temp = ZERO;
#pragma omp parallel for default(none) private(i,test) shared(N,xd,cd,md,temp) \
schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
md[i] = ZERO;
/* Continue if no constraints were set for the variable */
if (cd[i] == ZERO)
continue;
/* Check if a set constraint has been violated */
test = (SUNRabs(cd[i]) > ONEPT5 && xd[i]*cd[i] <= ZERO) ||
(SUNRabs(cd[i]) > HALF && xd[i]*cd[i] < ZERO);
if (test) {
temp = md[i] = ONE; /* Here is a race to write to temp */
}
}
/* Return false if any constraint was violated */
return (temp == ONE) ? SUNFALSE : SUNTRUE;
}
/* ----------------------------------------------------------------------------
* Compute minimum componentwise quotient
*/
realtype N_VMinQuotient_OpenMP(N_Vector num, N_Vector denom)
{
sunindextype i, N;
realtype *nd, *dd, min, tmin, val;
i = 0; /* initialize to suppress clang warning */
nd = dd = NULL;
N = NV_LENGTH_OMP(num);
nd = NV_DATA_OMP(num);
dd = NV_DATA_OMP(denom);
min = BIG_REAL;
#pragma omp parallel default(none) private(i,tmin,val) shared(N,min,nd,dd) \
num_threads(NV_NUM_THREADS_OMP(num))
{
tmin = BIG_REAL;
#pragma omp for schedule(static)
for (i = 0; i < N; i++) {
if (dd[i] != ZERO) {
val = nd[i]/dd[i];
if (val < tmin) tmin = val;
}
}
if (tmin < min) {
#pragma omp critical
{
if (tmin < min) min = tmin;
}
}
}
return(min);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a vector
*/
realtype N_VWSqrSumLocal_OpenMP(N_Vector x, N_Vector w)
{
sunindextype i, N;
realtype sum, *xd, *wd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
sum += SUNSQR(xd[i]*wd[i]);
}
return(sum);
}
/* ----------------------------------------------------------------------------
* Computes weighted square sum of a masked vector
*/
realtype N_VWSqrSumMaskLocal_OpenMP(N_Vector x, N_Vector w, N_Vector id)
{
sunindextype i, N;
realtype sum, *xd, *wd, *idd;
i = 0; /* initialize to suppress clang warning */
sum = ZERO;
xd = wd = idd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
wd = NV_DATA_OMP(w);
idd = NV_DATA_OMP(id);
#pragma omp parallel for default(none) private(i) shared(N,xd,wd,idd) \
reduction(+:sum) schedule(static) num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++) {
if (idd[i] > ZERO) {
sum += SUNSQR(xd[i]*wd[i]);
}
}
return(sum);
}
/*
* -----------------------------------------------------------------
* fused vector operations
* -----------------------------------------------------------------
*/
int N_VLinearCombination_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector z)
{
int i;
sunindextype j, N;
realtype* zd=NULL;
realtype* xd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMP(c[0], X[0], z);
return(0);
}
/* should have called N_VLinearSum */
if (nvec == 2) {
N_VLinearSum_OpenMP(c[0], X[0], c[1], X[1], z);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(z);
zd = NV_DATA_OMP(z);
/*
* X[0] += c[i]*X[i], i = 1,...,nvec-1
*/
if ((X[0] == z) && (c[0] == ONE)) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
/*
* X[0] = c[0] * X[0] + sum{ c[i] * X[i] }, i = 1,...,nvec-1
*/
if (X[0] == z) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] *= c[0];
}
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
/*
* z = sum{ c[i] * X[i] }, i = 0,...,nvec-1
*/
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c,zd) \
num_threads(NV_NUM_THREADS_OMP(z))
{
xd = NV_DATA_OMP(X[0]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c[0] * xd[j];
}
for (i=1; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] += c[i] * xd[j];
}
}
}
return(0);
}
int N_VScaleAddMulti_OpenMP(int nvec, realtype* a, N_Vector x, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMP(a[0], x, ONE, Y[0], Z[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp parallel default(none) private(i,j,yd) shared(nvec,Y,N,a,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
yd[j] += a[i] * xd[j];
}
}
}
return(0);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp parallel default(none) private(i,j,yd,zd) shared(nvec,Y,Z,N,a,xd) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = a[i] * xd[j] + yd[j];
}
}
}
return(0);
}
int N_VDotProdMulti_OpenMP(int nvec, N_Vector x, N_Vector* Y, realtype* dotprods)
{
int i;
sunindextype j, N;
realtype sum;
realtype* xd=NULL;
realtype* yd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VDotProd */
if (nvec == 1) {
dotprods[0] = N_VDotProd_OpenMP(x, Y[0]);
return(0);
}
/* get vector length and data array */
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
/* initialize dot products */
for (i=0; i<nvec; i++) {
dotprods[i] = ZERO;
}
/* compute multiple dot products */
#pragma omp parallel default(none) private(i,j,yd,sum) shared(nvec,Y,N,xd,dotprods) \
num_threads(NV_NUM_THREADS_OMP(x))
{
for (i=0; i<nvec; i++) {
yd = NV_DATA_OMP(Y[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
sum += xd[j] * yd[j];
}
#pragma omp critical
{
dotprods[i] += sum;
}
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* vector array operations
* -----------------------------------------------------------------
*/
int N_VLinearSumVectorArray_OpenMP(int nvec,
realtype a, N_Vector* X,
realtype b, N_Vector* Y,
N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
realtype c;
N_Vector* V1;
N_Vector* V2;
booleantype test;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VLinearSum */
if (nvec == 1) {
N_VLinearSum_OpenMP(a, X[0], b, Y[0], Z[0]);
return(0);
}
/* BLAS usage: axpy y <- ax+y */
if ((b == ONE) && (Z == Y))
return(VaxpyVectorArray_OpenMP(nvec, a, X, Y));
/* BLAS usage: axpy x <- by+x */
if ((a == ONE) && (Z == X))
return(VaxpyVectorArray_OpenMP(nvec, b, Y, X));
/* Case: a == b == 1.0 */
if ((a == ONE) && (b == ONE))
return(VSumVectorArray_OpenMP(nvec, X, Y, Z));
/* Cases: */
/* (1) a == 1.0, b = -1.0, */
/* (2) a == -1.0, b == 1.0 */
if ((test = ((a == ONE) && (b == -ONE))) || ((a == -ONE) && (b == ONE))) {
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VDiffVectorArray_OpenMP(nvec, V2, V1, Z));
}
/* Cases: */
/* (1) a == 1.0, b == other or 0.0, */
/* (2) a == other or 0.0, b == 1.0 */
/* if a or b is 0.0, then user should have called N_VScale */
if ((test = (a == ONE)) || (b == ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin1VectorArray_OpenMP(nvec, c, V1, V2, Z));
}
/* Cases: */
/* (1) a == -1.0, b != 1.0, */
/* (2) a != 1.0, b == -1.0 */
if ((test = (a == -ONE)) || (b == -ONE)) {
c = test ? b : a;
V1 = test ? Y : X;
V2 = test ? X : Y;
return(VLin2VectorArray_OpenMP(nvec, c, V1, V2, Z));
}
/* Case: a == b */
/* catches case both a and b are 0.0 - user should have called N_VConst */
if (a == b)
return(VScaleSumVectorArray_OpenMP(nvec, a, X, Y, Z));
/* Case: a == -b */
if (a == -b)
return(VScaleDiffVectorArray_OpenMP(nvec, a, X, Y, Z));
/* Do all cases not handled above: */
/* (1) a == other, b == 0.0 - user should have called N_VScale */
/* (2) a == 0.0, b == other - user should have called N_VScale */
/* (3) a,b == other, a !=b, a != -b */
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/* compute linear sum for each vector pair in vector arrays */
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a,b) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = a * xd[j] + b * yd[j];
}
}
}
return(0);
}
int N_VScaleVectorArray_OpenMP(int nvec, realtype* c, N_Vector* X, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VScale */
if (nvec == 1) {
N_VScale_OpenMP(c[0], X[0], Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/*
* X[i] *= c[i]
*/
if (X == Z) {
#pragma omp parallel default(none) private(i,j,xd) shared(nvec,X,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
xd[j] *= c[i];
}
}
}
return(0);
}
/*
* Z[i] = c[i] * X[i]
*/
#pragma omp parallel default(none) private(i,j,xd,zd) shared(nvec,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c[i] * xd[j];
}
}
}
return(0);
}
int N_VConstVectorArray_OpenMP(int nvec, realtype c, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VConst */
if (nvec == 1) {
N_VConst_OpenMP(c, Z[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/* set each vector in the vector array to a constant */
#pragma omp parallel default(none) private(i,j,zd) shared(nvec,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (i=0; i<nvec; i++) {
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
zd[j] = c;
}
}
}
return(0);
}
int N_VWrmsNormVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* W, realtype* nrm)
{
int i;
sunindextype j, N;
realtype sum;
realtype* wd=NULL;
realtype* xd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNorm_OpenMP(X[0], W[0]);
return(0);
}
/* get vector length */
N = NV_LENGTH_OMP(X[0]);
/* initialize norms */
for (i=0; i<nvec; i++) {
nrm[i] = ZERO;
}
/* compute the WRMS norm for each vector in the vector array */
#pragma omp parallel default(none) private(i,j,xd,wd,sum) shared(nvec,X,W,N,nrm) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
wd = NV_DATA_OMP(W[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
sum += SUNSQR(xd[j] * wd[j]);
}
#pragma omp critical
{
nrm[i] += sum;
}
}
}
for (i=0; i<nvec; i++) {
nrm[i] = SUNRsqrt(nrm[i]/N);
}
return(0);
}
int N_VWrmsNormMaskVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* W,
N_Vector id, realtype* nrm)
{
int i;
sunindextype j, N;
realtype sum;
realtype* wd=NULL;
realtype* xd=NULL;
realtype* idd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
/* should have called N_VWrmsNorm */
if (nvec == 1) {
nrm[0] = N_VWrmsNormMask_OpenMP(X[0], W[0], id);
return(0);
}
/* get vector length and mask data array */
N = NV_LENGTH_OMP(X[0]);
idd = NV_DATA_OMP(id);
/* initialize norms */
for (i=0; i<nvec; i++) {
nrm[i] = ZERO;
}
/* compute the WRMS norm for each vector in the vector array */
#pragma omp parallel default(none) private(i,j,xd,wd,sum) shared(nvec,X,W,N,idd,nrm) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
wd = NV_DATA_OMP(W[i]);
sum = ZERO;
#pragma omp for schedule(static)
for (j=0; j<N; j++) {
if (idd[j] > ZERO)
sum += SUNSQR(xd[j] * wd[j]);
}
#pragma omp critical
{
nrm[i] += sum;
}
}
}
for (i=0; i<nvec; i++) {
nrm[i] = SUNRsqrt(nrm[i]/N);
}
return(0);
}
int N_VScaleAddMultiVectorArray_OpenMP(int nvec, int nsum, realtype* a,
N_Vector* X, N_Vector** Y, N_Vector** Z)
{
int i, j;
sunindextype k, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
int retval;
N_Vector* YY;
N_Vector* ZZ;
i = 0; /* initialize to suppress clang warning */
k = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VLinearSum */
if (nsum == 1) {
N_VLinearSum_OpenMP(a[0], X[0], ONE, Y[0][0], Z[0][0]);
return(0);
}
/* should have called N_VScaleAddMulti */
YY = (N_Vector*) malloc(nsum * sizeof(N_Vector));
ZZ = (N_Vector*) malloc(nsum * sizeof(N_Vector));
for (j=0; j<nsum; j++) {
YY[j] = Y[j][0];
ZZ[j] = Z[j][0];
}
retval = N_VScaleAddMulti_OpenMP(nsum, a, X[0], YY, ZZ);
free(YY);
free(ZZ);
return(retval);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VLinearSumVectorArray */
if (nsum == 1) {
retval = N_VLinearSumVectorArray_OpenMP(nvec, a[0], X, ONE, Y[0], Z[0]);
return(retval);
}
/* ----------------------------
* Compute multiple linear sums
* ---------------------------- */
/* get vector length */
N = NV_LENGTH_OMP(X[0]);
/*
* Y[i][j] += a[i] * x[j]
*/
if (Y == Z) {
#pragma omp parallel default(none) private(i,j,k,xd,yd) shared(nvec,nsum,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
for (j=0; j<nsum; j++) {
yd = NV_DATA_OMP(Y[j][i]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
yd[k] += a[j] * xd[k];
}
}
}
}
return(0);
}
/*
* Z[i][j] = Y[i][j] + a[i] * x[j]
*/
#pragma omp parallel default(none) private(i,j,k,xd,yd,zd) shared(nvec,nsum,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
for (j=0; j<nsum; j++) {
yd = NV_DATA_OMP(Y[j][i]);
zd = NV_DATA_OMP(Z[j][i]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] = a[j] * xd[k] + yd[k];
}
}
}
}
return(0);
}
int N_VLinearCombinationVectorArray_OpenMP(int nvec, int nsum,
realtype* c,
N_Vector** X,
N_Vector* Z)
{
int i; /* vector arrays index in summation [0,nsum) */
int j; /* vector index in vector array [0,nvec) */
sunindextype k; /* element index in vector [0,N) */
sunindextype N;
realtype* zd=NULL;
realtype* xd=NULL;
realtype* ctmp;
N_Vector* Y;
i = 0; /* initialize to suppress clang warning */
k = 0;
/* invalid number of vectors */
if (nvec < 1) return(-1);
if (nsum < 1) return(-1);
/* ---------------------------
* Special cases for nvec == 1
* --------------------------- */
if (nvec == 1) {
/* should have called N_VScale */
if (nsum == 1) {
N_VScale_OpenMP(c[0], X[0][0], Z[0]);
return(0);
}
/* should have called N_VLinearSum */
if (nsum == 2) {
N_VLinearSum_OpenMP(c[0], X[0][0], c[1], X[1][0], Z[0]);
return(0);
}
/* should have called N_VLinearCombination */
Y = (N_Vector*) malloc(nsum * sizeof(N_Vector));
for (i=0; i<nsum; i++) {
Y[i] = X[i][0];
}
N_VLinearCombination_OpenMP(nsum, c, Y, Z[0]);
free(Y);
return(0);
}
/* --------------------------
* Special cases for nvec > 1
* -------------------------- */
/* should have called N_VScaleVectorArray */
if (nsum == 1) {
ctmp = (realtype*) malloc(nvec * sizeof(realtype));
for (j=0; j<nvec; j++) {
ctmp[j] = c[0];
}
N_VScaleVectorArray_OpenMP(nvec, ctmp, X[0], Z);
free(ctmp);
return(0);
}
/* should have called N_VLinearSumVectorArray */
if (nsum == 2) {
N_VLinearSumVectorArray_OpenMP(nvec, c[0], X[0], c[1], X[1], Z);
return(0);
}
/* --------------------------
* Compute linear combination
* -------------------------- */
/* get vector length */
N = NV_LENGTH_OMP(Z[0]);
/*
* X[0][j] += c[i]*X[i][j], i = 1,...,nvec-1
*/
if ((X[0] == Z) && (c[0] == ONE)) {
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
zd = NV_DATA_OMP(Z[j]);
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* X[0][j] = c[0] * X[0][j] + sum{ c[i] * X[i][j] }, i = 1,...,nvec-1
*/
if (X[0] == Z) {
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
zd = NV_DATA_OMP(Z[j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] *= c[0];
}
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* Z[j] = sum{ c[i] * X[i][j] }, i = 0,...,nvec-1
*/
#pragma omp parallel default(none) private(i,j,k,xd,zd) shared(nvec,nsum,X,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(Z[0]))
{
for (j=0; j<nvec; j++) {
/* scale first vector in the sum into the output vector */
xd = NV_DATA_OMP(X[0][j]);
zd = NV_DATA_OMP(Z[j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] = c[0] * xd[k];
}
/* scale and sum remaining vectors into the output vector */
for (i=1; i<nsum; i++) {
xd = NV_DATA_OMP(X[i][j]);
#pragma omp for schedule(static)
for (k=0; k<N; k++) {
zd[k] += c[i] * xd[k];
}
}
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector operations
* -----------------------------------------------------------------
*/
/* ----------------------------------------------------------------------------
* Copy vector components into a second vector
*/
static void VCopy_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum
*/
static void VSum_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference
*/
static void VDiff_OpenMP(N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = xd[i]-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute the negative of a vector
*/
static void VNeg_OpenMP(N_Vector x, N_Vector z)
{
sunindextype i, N;
realtype *xd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,xd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = -xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector sum
*/
static void VScaleSum_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]+yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector difference
*/
static void VScaleDiff_OpenMP(realtype c, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,c,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = c*(xd[i]-yd[i]);
return;
}
/* ----------------------------------------------------------------------------
* Compute vector sum z[i] = a*x[i]+y[i]
*/
static void VLin1_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])+yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute vector difference z[i] = a*x[i]-y[i]
*/
static void VLin2_OpenMP(realtype a, N_Vector x, N_Vector y, N_Vector z)
{
sunindextype i, N;
realtype *xd, *yd, *zd;
i = 0; /* initialize to suppress clang warning */
xd = yd = zd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
zd = NV_DATA_OMP(z);
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd,zd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
zd[i] = (a*xd[i])-yd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute special cases of linear sum
*/
static void Vaxpy_OpenMP(realtype a, N_Vector x, N_Vector y)
{
sunindextype i, N;
realtype *xd, *yd;
i = 0; /* initialize to suppress clang warning */
xd = yd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
yd = NV_DATA_OMP(y);
if (a == ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += xd[i];
return;
}
if (a == -ONE) {
#pragma omp parallel for default(none) private(i) shared(N,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] -= xd[i];
return;
}
#pragma omp parallel for default(none) private(i) shared(N,a,xd,yd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
yd[i] += a*xd[i];
return;
}
/* ----------------------------------------------------------------------------
* Compute scaled vector x[i] = a*x[i]
*/
static void VScaleBy_OpenMP(realtype a, N_Vector x)
{
sunindextype i, N;
realtype *xd;
i = 0; /* initialize to suppress clang warning */
xd = NULL;
N = NV_LENGTH_OMP(x);
xd = NV_DATA_OMP(x);
#pragma omp parallel for default(none) private(i) shared(N,a,xd) schedule(static) \
num_threads(NV_NUM_THREADS_OMP(x))
for (i = 0; i < N; i++)
xd[i] *= a;
return;
}
/*
* -----------------------------------------------------------------
* private functions for special cases of vector array operations
* -----------------------------------------------------------------
*/
static int VSumVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = xd[j] + yd[j];
}
}
return(0);
}
static int VDiffVectorArray_OpenMP(int nvec, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = xd[j] - yd[j];
}
}
return(0);
}
static int VScaleSumVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = c * (xd[j] + yd[j]);
}
}
return(0);
}
static int VScaleDiffVectorArray_OpenMP(int nvec, realtype c, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,c) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = c * (xd[j] - yd[j]);
}
}
return(0);
}
static int VLin1VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = (a * xd[j]) + yd[j];
}
}
return(0);
}
static int VLin2VectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y, N_Vector* Z)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
realtype* zd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
#pragma omp parallel default(none) private(i,j,xd,yd,zd) shared(nvec,X,Y,Z,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
zd = NV_DATA_OMP(Z[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
zd[j] = (a * xd[j]) - yd[j];
}
}
return(0);
}
static int VaxpyVectorArray_OpenMP(int nvec, realtype a, N_Vector* X, N_Vector* Y)
{
int i;
sunindextype j, N;
realtype* xd=NULL;
realtype* yd=NULL;
i = 0; /* initialize to suppress clang warning */
j = 0;
N = NV_LENGTH_OMP(X[0]);
if (a == ONE) {
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] += xd[j];
}
}
return(0);
}
if (a == -ONE) {
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] -= xd[j];
}
}
return(0);
}
#pragma omp parallel default(none) private(i,j,xd,yd) shared(nvec,X,Y,N,a) \
num_threads(NV_NUM_THREADS_OMP(X[0]))
{
for (i=0; i<nvec; i++) {
xd = NV_DATA_OMP(X[i]);
yd = NV_DATA_OMP(Y[i]);
#pragma omp for schedule(static)
for (j=0; j<N; j++)
yd[j] += a * xd[j];
}
}
return(0);
}
/*
* -----------------------------------------------------------------
* Enable / Disable fused and vector array operations
* -----------------------------------------------------------------
*/
int N_VEnableFusedOps_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
if (tf) {
/* enable all fused vector operations */
v->ops->nvlinearcombination = N_VLinearCombination_OpenMP;
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP;
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP;
/* enable all vector array operations */
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP;
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP;
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP;
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP;
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP;
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP;
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP;
} else {
/* disable all fused vector operations */
v->ops->nvlinearcombination = NULL;
v->ops->nvscaleaddmulti = NULL;
v->ops->nvdotprodmulti = NULL;
/* disable all vector array operations */
v->ops->nvlinearsumvectorarray = NULL;
v->ops->nvscalevectorarray = NULL;
v->ops->nvconstvectorarray = NULL;
v->ops->nvwrmsnormvectorarray = NULL;
v->ops->nvwrmsnormmaskvectorarray = NULL;
v->ops->nvscaleaddmultivectorarray = NULL;
v->ops->nvlinearcombinationvectorarray = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearCombination_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombination = N_VLinearCombination_OpenMP;
else
v->ops->nvlinearcombination = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMulti_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmulti = N_VScaleAddMulti_OpenMP;
else
v->ops->nvscaleaddmulti = NULL;
/* return success */
return(0);
}
int N_VEnableDotProdMulti_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvdotprodmulti = N_VDotProdMulti_OpenMP;
else
v->ops->nvdotprodmulti = NULL;
/* return success */
return(0);
}
int N_VEnableLinearSumVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_OpenMP;
else
v->ops->nvlinearsumvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscalevectorarray = N_VScaleVectorArray_OpenMP;
else
v->ops->nvscalevectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableConstVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvconstvectorarray = N_VConstVectorArray_OpenMP;
else
v->ops->nvconstvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_OpenMP;
else
v->ops->nvwrmsnormvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormMaskVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_OpenMP;
else
v->ops->nvwrmsnormmaskvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMultiVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_OpenMP;
else
v->ops->nvscaleaddmultivectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableLinearCombinationVectorArray_OpenMP(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_OpenMP;
else
v->ops->nvlinearcombinationvectorarray = NULL;
/* return success */
return(0);
}
|
pyparallel_menu.c | #include "pyparallel_menu.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <omp.h>
#define PI 3.14159265358979323846
int *PSF(int *counts,int size,double *x_pos,double *y_pos, double *psf_ratio, double *psf_sigmal,double *psf_sigmah,int nr,int nc, int test, int threads){
int i,k,ssum,dim=nr*nc,N;
int ii,jj,electron_index,electron_counter,xpos,ypos;
double R,theta;
unsigned seed;
int *pixel_array = malloc(dim*sizeof(int));
ssum = 0;
#pragma omp parallel num_threads(threads)\
shared(counts)\
private(k)\
firstprivate(size)\
reduction(+:ssum)
{
int myid = omp_get_thread_num(),thread_number = omp_get_num_threads();
int istart = (myid*size/thread_number) , iend = ((myid+1)*size/thread_number);
if (myid == thread_number -1) {iend = size;}
for (k=istart;k<iend;k++){
ssum += counts[k];
}
}
double *A = malloc((2*ssum)*sizeof(double));
#pragma omp parallel num_threads(threads)\
shared(A)\
private(i,seed,R,theta)\
firstprivate(ssum)
{
int myid = omp_get_thread_num(),thread_number = omp_get_num_threads();
int istart = myid*ssum/thread_number , iend = (myid+1)*ssum/thread_number;
if (myid == thread_number -1) {iend = ssum;}
seed = 25234 + 17*myid + test;
for (i=istart;i<iend;i++) {
theta = 2.*PI*rand_r(&seed)/((double)RAND_MAX);
R = sqrt(-2.*log(rand_r(&seed)/((double)RAND_MAX)));
A[i] = R *cos(theta) ;
A[i+ssum] = R *sin(theta) ;
}
}
/*--------- filling pixe_array with zeros--------------*/
#pragma omp parallel num_threads(threads)\
shared(pixel_array)\
private(i)\
firstprivate(dim)
{
int myid = omp_get_thread_num(),thread_number = omp_get_num_threads();
int istart = myid*dim/thread_number , iend = (myid+1)*dim/thread_number;
if (myid == thread_number -1) {iend = dim;}
for (i=istart;i<iend;i++) {
pixel_array[i] = 0;
}
}
/*--------- filling pixel_array with zeros--------------*/
electron_counter = 0;
for (ii=0;ii<size;ii++){
N = counts[ii]*psf_ratio[ii] ;
for (jj=0;jj<N;jj++){
xpos = A[electron_counter]*psf_sigmah[ii] + x_pos[ii];
ypos = A[electron_counter+ssum]*psf_sigmah[ii] + y_pos[ii];
if ( xpos>0 && xpos<nr && ypos>0 && ypos<nc) {
electron_index = ypos*nc + xpos;
pixel_array[electron_index] = pixel_array[electron_index] + 1;
}
electron_counter = electron_counter + 1;
}
for (jj=0;jj<(counts[ii]-N);jj++) {
xpos = A[electron_counter]*psf_sigmal[ii] + x_pos[ii];
ypos = A[electron_counter+ssum]*psf_sigmal[ii] + y_pos[ii];
if ( xpos>0 && xpos<nr && ypos>0 && ypos<nc) {
electron_index = ypos*nc + xpos;
pixel_array[electron_index] = pixel_array[electron_index] + 1;
}
electron_counter = electron_counter + 1;
}
}
free(A);
return pixel_array;
}
|
pyfr_gemm_cm.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <libxsmm.h>
#include <stdlib.h>
#include <assert.h>
#include <stdio.h>
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
# include <mkl.h>
#else /* prototypes for GEMM */
void my_dgemm( const int* M, const int* N, const int* K, const double* alpha,
const double* a, const int* LDA, const double* b, const int* LDB,
const double* beta, double* c, const int* LDC ) {
const int my_M = *M;
const int my_N = *N;
const int my_K = *K;
const int my_LDA = *LDA;
const int my_LDB = *LDB;
const int my_LDC = *LDC;
const float my_alpha = *alpha;
const float my_beta = *beta;
int m = 0, n = 0, k = 0;
for ( n = 0; n < my_N; ++n ) {
for ( m = 0; m < my_M; ++m ) {
c[(n * my_LDC) + m] = my_beta * c[(n * my_LDC) + m];
for ( k = 0; k < my_K; ++k ) {
c[(n * my_LDC) + m] += my_alpha * a[(k * my_LDA) + m] * b[(n * my_LDB) + k];
}
}
}
}
#endif
int main(int argc, char *argv[])
{
int n,m,k;
int lda,ldb,ldc;
double* a;
double* b;
double* c1;
double* c2;
libxsmm_timer_tickint l_start, l_end;
double l_total = 0.0;
int reps, i, j;
const int nblock = 16;
double alpha = 1.0, beta = 1.0;
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
char transa = 'N', transb = 'N';
#endif
int l_prefetch_op = LIBXSMM_PREFETCH_NONE;
libxsmm_dmmfunction kernel = NULL;
if (argc != 5) {
assert(0 < argc);
fprintf(stderr, "Invalid: try %s M N K reps\n", argv[0]);
exit(-1);
}
m = atoi(argv[1]);
n = atoi(argv[2]);
k = atoi(argv[3]);
reps = atoi(argv[4]);
/* this is col-major what you want to use for the sizes in question */
lda = m;
ldb = k;
ldc = m;
if (n % nblock != 0) {
fprintf(stderr, "N needs to be divisible by %i\n", nblock);
exit(-1);
}
a = (double*)libxsmm_aligned_malloc(sizeof(double)*lda*k, 64);
b = (double*)libxsmm_aligned_malloc(sizeof(double)*ldb*n, 64);
c1 = (double*)libxsmm_aligned_malloc(sizeof(double)*ldc*n, 64);
c2 = (double*)libxsmm_aligned_malloc(sizeof(double)*ldc*n, 64);
#pragma omp parallel for
for (i = 0; i < lda*k; i++) {
a[i] = libxsmm_rng_f64();
}
#pragma omp parallel for
for (i = 0; i < ldb*n; i++) {
b[i] = libxsmm_rng_f64();
}
#pragma omp parallel for
for (i = 0; i < ldc*n; i++) {
c1[i] = 0;
c2[i] = 0;
}
/* JIT Kernel */
kernel = libxsmm_dmmdispatch(m, nblock, k, NULL, NULL, NULL, NULL, NULL, NULL, &l_prefetch_op );
/* init MKL */
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
dgemm(&transa, &transb, &m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc);
#else
my_dgemm(&m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc);
#endif
#pragma omp parallel for
for (i = 0; i < ldc*n; i++) {
c1[i] = 0;
c2[i] = 0;
}
l_start = libxsmm_timer_tick();
for ( j = 0; j < reps; j++ ) {
#if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL)
dgemm(&transa, &transb, &m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc);
#else
my_dgemm(&m, &n, &k, &alpha, a, &lda, b, &ldb, &beta, c1, &ldc);
#endif
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
fprintf(stdout, "time[s] MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps );
fprintf(stdout, "GFLOPS MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s MKL (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total );
l_start = libxsmm_timer_tick();
for ( j = 0; j < reps; j++ ) {
#pragma omp parallel for private(i)
for ( i = 0; i < n; i+=nblock) {
kernel( a, &b[ldb*i], &c2[ldc*i], NULL, NULL, NULL );
}
l_end = libxsmm_timer_tick();
}
l_total = libxsmm_timer_duration(l_start, l_end);
fprintf(stdout, "time[s] libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps );
fprintf(stdout, "GFLOPS libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total );
fprintf(stdout, "GB/s libxsmm (CM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total );
/* test result */
double max_error = 0.0;
for ( i = 0; i < ldc*n; i++) {
if (max_error < fabs(c1[i] - c2[i])) {
max_error = fabs(c1[i] - c2[i]);
}
}
printf("max error: %f\n\n", max_error);
return EXIT_SUCCESS;
}
|
GB_binop__minus_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__minus_int32
// A.*B function (eWiseMult): GB_AemultB__minus_int32
// A*D function (colscale): GB_AxD__minus_int32
// D*A function (rowscale): GB_DxB__minus_int32
// C+=B function (dense accum): GB_Cdense_accumB__minus_int32
// C+=b function (dense accum): GB_Cdense_accumb__minus_int32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__minus_int32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__minus_int32
// C=scalar+B GB_bind1st__minus_int32
// C=scalar+B' GB_bind1st_tran__minus_int32
// C=A+scalar GB_bind2nd__minus_int32
// C=A'+scalar GB_bind2nd_tran__minus_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x - y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_INT32 || GxB_NO_MINUS_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__minus_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__minus_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__minus_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__minus_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__minus_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__minus_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__minus_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__minus_int32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__minus_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t bij = Bx [p] ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__minus_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB_bind1st_tran__minus_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB_bind2nd_tran__minus_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ParticleContainer.h | /**
* @file ParticleContainer.h
*
* @date 17 Jan 2018
* @author tchipevn
*/
#pragma once
#include <array>
#include "autopas/containers/ParticleContainerInterface.h"
#include "autopas/containers/TraversalInterface.h"
#ifdef AUTOPAS_OPENMP
#include <omp.h>
#endif
namespace autopas {
// consider multiple inheritance or delegation to avoid virtual call to Functor
/**
* The ParticleContainer class stores particles in some object and provides
* methods to iterate over its particles.
* @tparam ParticleCell Class for the particle cells
*/
template <class ParticleCell, class SoAArraysType = typename ParticleCell::ParticleType::SoAArraysType>
class ParticleContainer : public ParticleContainerInterface<ParticleCell> {
public:
/**
* Constructor of ParticleContainer
* @param boxMin
* @param boxMax
* @param cutoff
* @param skin
*/
ParticleContainer(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, const double cutoff,
const double skin)
: _cells(), _boxMin(boxMin), _boxMax(boxMax), _cutoff(cutoff), _skin(skin) {}
/**
* Destructor of ParticleContainer.
*/
~ParticleContainer() override = default;
/**
* Delete the copy constructor to prevent unwanted copies.
* No particle container should ever be copied.
* @param obj
*/
ParticleContainer(const ParticleContainer &obj) = delete;
/**
* Delete the copy assignment operator to prevent unwanted copies
* No particle container should ever be copied.
* @param other
* @return
*/
ParticleContainer &operator=(const ParticleContainer &other) = delete;
/**
* @copydoc autopas::ParticleContainerInterface::getBoxMax()
*/
const std::array<double, 3> &getBoxMax() const override final { return _boxMax; }
/**
* @copydoc autopas::ParticleContainerInterface::setBoxMax()
*/
void setBoxMax(const std::array<double, 3> &boxMax) override final { _boxMax = boxMax; }
/**
* @copydoc autopas::ParticleContainerInterface::getBoxMin()
*/
const std::array<double, 3> &getBoxMin() const override final { return _boxMin; }
/**
* @copydoc autopas::ParticleContainerInterface::setBoxMin()
*/
void setBoxMin(const std::array<double, 3> &boxMin) override final { _boxMin = boxMin; }
/**
* @copydoc autopas::ParticleContainerInterface::getCutoff()
*/
double getCutoff() const override final { return _cutoff; }
/**
* @copydoc autopas::ParticleContainerInterface::setCutoff()
*/
void setCutoff(double cutoff) override final { _cutoff = cutoff; }
/**
* @copydoc autopas::ParticleContainerInterface::getSkin()
*/
double getSkin() const override final { return _skin; }
/**
* @copydoc autopas::ParticleContainerInterface::setSkin()
*/
void setSkin(double skin) override final { _skin = skin; }
/**
* @copydoc autopas::ParticleContainerInterface::getInteractionLength()
*/
double getInteractionLength() const override final { return _cutoff + _skin; }
/**
* Deletes all particles from the container.
*/
void deleteAllParticles() override {
#ifdef AUTOPAS_OPENMP
// @todo: find a sensible value for magic number
// numThreads should be at least 1 and maximal max_threads
int numThreads = std::max(1, std::min(omp_get_max_threads(), (int)(this->_cells.size() / 1000)));
AutoPasLog(trace, "Using {} threads", numThreads);
#pragma omp parallel for num_threads(numThreads)
#endif
for (size_t i = 0; i < this->_cells.size(); ++i) {
this->_cells[i].clear();
}
}
/**
* Get the number of particles saved in the container.
* @return Number of particles in the container.
*/
unsigned long getNumParticles() const override {
size_t numParticles = 0ul;
#ifdef AUTOPAS_OPENMP
// @todo: find a sensible value for magic number
// numThreads should be at least 1 and maximal max_threads
int numThreads = std::max(1, std::min(omp_get_max_threads(), (int)(this->_cells.size() / 1000)));
AutoPasLog(trace, "Using {} threads", numThreads);
#pragma omp parallel for num_threads(numThreads) reduction(+ : numParticles)
#endif
for (size_t index = 0; index < _cells.size(); ++index) {
numParticles += _cells[index].numParticles();
}
return numParticles;
}
protected:
/**
* Vector of particle cells.
* All particle containers store their particles in ParticleCells. This is the
* common vector for this purpose.
*/
std::vector<ParticleCell> _cells;
private:
std::array<double, 3> _boxMin;
std::array<double, 3> _boxMax;
double _cutoff;
double _skin;
};
} // namespace autopas
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/accelerate-private.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/cache.h"
#include "magick/cache-view.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/decorate.h"
#include "magick/distort.h"
#include "magick/draw.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/fx.h"
#include "magick/fx-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/layer.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/opencl-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/random_.h"
#include "magick/random-private.h"
#include "magick/resample.h"
#include "magick/resample-private.h"
#include "magick/resize.h"
#include "magick/resource_.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Define declarations.
*/
typedef enum
{
BitwiseAndAssignmentOperator = 0xd9U,
BitwiseOrAssignmentOperator,
LeftShiftAssignmentOperator,
RightShiftAssignmentOperator,
PowerAssignmentOperator,
ModuloAssignmentOperator,
PlusAssignmentOperator,
SubtractAssignmentOperator,
MultiplyAssignmentOperator,
DivideAssignmentOperator,
IncrementAssignmentOperator,
DecrementAssignmentOperator,
LeftShiftOperator,
RightShiftOperator,
LessThanEqualOperator,
GreaterThanEqualOperator,
EqualOperator,
NotEqualOperator,
LogicalAndOperator,
LogicalOrOperator,
ExponentialNotation
} FxOperator;
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
*/
MagickExport FxInfo *AcquireFxInfo(const Image *images,const char *expression)
{
const Image
*next;
FxInfo
*fx_info;
register ssize_t
i;
unsigned char
fx_op[2];
fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info));
(void) memset(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,fx_info->exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(unsigned char) BitwiseAndAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op);
*fx_op=(unsigned char) BitwiseOrAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op);
*fx_op=(unsigned char) RightShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op);
*fx_op=(unsigned char) PowerAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op);
*fx_op=(unsigned char) ModuloAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op);
*fx_op=(unsigned char) PlusAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op);
*fx_op=(unsigned char) SubtractAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op);
*fx_op=(unsigned char) MultiplyAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op);
*fx_op=(unsigned char) DivideAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op);
*fx_op=(unsigned char) IncrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"++",(char *) fx_op);
*fx_op=(unsigned char) DecrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"--",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op);
*fx_op=(unsigned char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",(char *) fx_op);
*fx_op=(unsigned char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op);
*fx_op=(unsigned char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",(char *) fx_op);
*fx_op=(unsigned char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",(char *) fx_op);
*fx_op=(unsigned char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op);
*fx_op=(unsigned char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op);
*fx_op=(unsigned char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",(char *) fx_op);
*fx_op=(unsigned char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",(char *) fx_op);
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickExport FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
register ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
% const ChannelType channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,double *alpha,
% Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline const double *GetFxSymbolValue(FxInfo *fx_info,const char *symbol)
{
return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol));
}
static inline MagickBooleanType SetFxSymbolValue(
FxInfo *magick_restrict fx_info,const char *magick_restrict symbol,
const double value)
{
double
*object;
object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (object != (double *) NULL)
{
*object=value;
return(MagickTrue);
}
object=(double *) AcquireQuantumMemory(1,sizeof(*object));
if (object == (double *) NULL)
{
(void) ThrowMagickException(fx_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
fx_info->images->filename);
return(MagickFalse);
}
*object=value;
return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object));
}
static double FxChannelStatistics(FxInfo *fx_info,const Image *image,
ChannelType channel,const char *symbol,ExceptionInfo *exception)
{
char
channel_symbol[MaxTextExtent],
key[MaxTextExtent];
const double
*value;
double
statistic;
register const char
*p;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
*channel_symbol='\0';
if (*p == '.')
{
ssize_t
option;
(void) CopyMagickString(channel_symbol,p+1,MaxTextExtent);
option=ParseCommandOption(MagickChannelOptions,MagickTrue,channel_symbol);
if (option >= 0)
channel=(ChannelType) option;
}
(void) FormatLocaleString(key,MaxTextExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=GetFxSymbolValue(fx_info,key);
if (value != (const double *) NULL)
return(QuantumScale*(*value));
statistic=0.0;
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageChannelDepth(image,channel,exception);
statistic=(double) depth;
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness,
exception);
statistic=kurtosis;
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageChannelRange(image,channel,&minima,&maxima,exception);
statistic=maxima;
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageChannelMean(image,channel,&mean,&standard_deviation,
exception);
statistic=mean;
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageChannelRange(image,channel,&minima,&maxima,exception);
statistic=minima;
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageChannelKurtosis(image,channel,&kurtosis,&skewness,
exception);
statistic=skewness;
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageChannelMean(image,channel,&mean,&standard_deviation,
exception);
statistic=standard_deviation;
}
if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse)
return(0.0);
return(QuantumScale*statistic);
}
static double
FxEvaluateSubexpression(FxInfo *,const ChannelType,const ssize_t,
const ssize_t,const char *,const size_t,double *,ExceptionInfo *);
static inline MagickBooleanType IsFxFunction(const char *expression,
const char *name,const size_t length)
{
int
c;
register size_t
i;
for (i=0; i <= length; i++)
if (expression[i] == '\0')
return(MagickFalse);
c=expression[length];
if ((LocaleNCompare(expression,name,length) == 0) &&
((isspace(c) == 0) || (c == '(')))
return(MagickTrue);
return(MagickFalse);
}
static MagickOffsetType FxGCD(MagickOffsetType alpha,MagickOffsetType beta)
{
if (beta != 0)
return(FxGCD(beta,alpha % beta));
return(alpha);
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
register ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const ChannelType channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
ExceptionInfo *exception)
{
char
*q,
symbol[MaxTextExtent];
const char
*p;
const double
*value;
double
alpha,
beta;
Image
*image;
MagickBooleanType
status;
MagickPixelPacket
pixel;
PointInfo
point;
register ssize_t
i;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
char
*subexpression;
subexpression=AcquireString(expression);
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
i=(ssize_t) alpha;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x=alpha;
point.y=beta;
if (*p != '\0')
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
subexpression=DestroyString(subexpression);
}
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
i=GetImageIndexInList(image);
GetMagickPixelPacket(image,&pixel);
status=InterpolateMagickPixelPacket(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
(void) status;
if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') &&
(LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) &&
(LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MaxTextExtent];
size_t
length;
(void) CopyMagickString(name,p,MaxTextExtent);
length=strlen(name);
for (q=name+length-1; q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
q=name;
if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') &&
(GetFxSymbolValue(fx_info,name) == (const double *) NULL))
{
MagickPixelPacket
*color;
color=(MagickPixelPacket *) GetValueFromSplayTree(fx_info->colors,
name);
if (color != (MagickPixelPacket *) NULL)
{
pixel=(*color);
p+=length;
}
else
if (QueryMagickColor(name,&pixel,fx_info->exception) != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,ConstantString(name),
CloneMagickPixelPacket(&pixel));
p+=length;
}
}
}
(void) CopyMagickString(symbol,p,MaxTextExtent);
StripString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedChannel: return(QuantumScale*pixel.red);
case GreenChannel: return(QuantumScale*pixel.green);
case BlueChannel: return(QuantumScale*pixel.blue);
case OpacityChannel:
{
double
alpha;
if (pixel.matte == MagickFalse)
return(1.0);
alpha=(double) (QuantumScale*GetPixelAlpha(&pixel));
return(alpha);
}
case IndexChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.index);
}
case DefaultChannels:
return(QuantumScale*GetMagickPixelIntensity(image,&pixel));
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((double) (QuantumScale*GetPixelAlpha(&pixel)));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(symbol,"channel",7) != MagickFalse)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case OpacityChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BlueChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case OpacityChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case IndexChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(symbol,"extent") == 0)
{
if (image->extent != 0)
return((double) image->extent);
return((double) GetBlobSize(image));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.index);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->x_resolution);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->y_resolution);
if (LocaleCompare(symbol,"intensity") == 0)
return(QuantumScale*GetMagickPixelIntensity(image,&pixel));
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminance;
luminance=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminance);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.opacity);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
if (LocaleCompare(symbol,"printsize.x") == 0)
return(PerceptibleReciprocal(image->x_resolution)*image->columns);
if (LocaleCompare(symbol,"printsize.y") == 0)
return(PerceptibleReciprocal(image->y_resolution)*image->rows);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->x_resolution);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->y_resolution);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(ClampToQuantum(pixel.red),ClampToQuantum(pixel.green),
ClampToQuantum(pixel.blue),&hue,&saturation,&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
{
double
depth;
depth=(double) GetImageChannelDepth(image,channel,fx_info->exception);
return(depth);
}
break;
}
default:
break;
}
value=GetFxSymbolValue(fx_info,symbol);
if (value != (const double *) NULL)
return(*value);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UndefinedVariable","`%s'",symbol);
(void) SetFxSymbolValue(fx_info,symbol,0.0);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
register const char
*subexpression;
register int
c;
size_t
level;
c=(-1);
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while ((c != '\0') && (*expression != '\0'))
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit(c) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((IsFxFunction(expression,"j0",2) != MagickFalse) ||
(IsFxFunction(expression,"j1",2) != MagickFalse))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit(c) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit(c) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha(c) != 0))
precedence=AdditionPrecedence;
break;
}
case BitwiseAndAssignmentOperator:
case BitwiseOrAssignmentOperator:
case LeftShiftAssignmentOperator:
case RightShiftAssignmentOperator:
case PowerAssignmentOperator:
case ModuloAssignmentOperator:
case PlusAssignmentOperator:
case SubtractAssignmentOperator:
case MultiplyAssignmentOperator:
case DivideAssignmentOperator:
case IncrementAssignmentOperator:
case DecrementAssignmentOperator:
{
precedence=AssignmentPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,const ChannelType channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
double *beta,ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
#define FxMaxSubexpressionDepth 200
#define FxReturn(value) \
{ \
subexpression=DestroyString(subexpression); \
return(value); \
}
#define FxParseConditional(subexpression,sentinal,p,q) \
{ \
p=subexpression; \
for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \
if (*q == '(') \
{ \
for (q++; (*q != ')') && (*q != '\0'); q++); \
if (*q == '\0') \
break; \
} \
if (*q == '\0') \
{ \
(void) ThrowMagickException(exception,GetMagickModule(), \
OptionError,"UnableToParseExpression","`%s'",subexpression); \
FxReturn(0.0); \
} \
if (strlen(q) == 1) \
*(q+1)='\0'; \
*q='\0'; \
}
char
*q,
*subexpression;
double
alpha,
gamma,
sans,
value;
register const char
*p;
*beta=0.0;
sans=0.0;
subexpression=AcquireString(expression);
*subexpression='\0';
if (depth > FxMaxSubexpressionDepth)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",expression);
FxReturn(0.0);
}
if (exception->severity >= ErrorException)
FxReturn(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
FxReturn(0.0);
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) (~(size_t) *beta);
FxReturn(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,
depth+1,beta,exception));
FxReturn(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(PerceptibleReciprocal(*beta)*alpha);
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha-(*beta));
}
case BitwiseAndAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case BitwiseOrAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case RightShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PowerAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=pow(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ModuloAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=fmod(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PlusAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case SubtractAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case MultiplyAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DivideAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*PerceptibleReciprocal(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case IncrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DecrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
FxReturn(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
FxReturn(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case '?':
{
double
gamma;
(void) CopyMagickString(subexpression,++p,MaxTextExtent-1);
FxParseConditional(subexpression,':',p,q);
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(gamma);
}
case '=':
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,
beta,exception);
FxReturn(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
size_t
length;
if (depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
length=CopyMagickString(subexpression,expression+1,MaxTextExtent);
if (length != 0)
subexpression[length-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
FxReturn(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (IsFxFunction(expression,"abs",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(acosh(alpha));
}
#endif
if (IsFxFunction(expression,"acos",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"airy",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(asinh(alpha));
}
#endif
if (IsFxFunction(expression,"asin",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(asin(alpha));
}
if (IsFxFunction(expression,"alt",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atanh(alpha));
}
#endif
if (IsFxFunction(expression,"atan",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(expression,"ceil",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(ceil(alpha));
}
if (IsFxFunction(expression,"clamp",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha < 0.0)
FxReturn(0.0);
if (alpha > 1.0)
FxReturn(1.0);
FxReturn(alpha);
}
if (IsFxFunction(expression,"cosh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(cosh(alpha));
}
if (IsFxFunction(expression,"cos",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'D':
case 'd':
{
if (IsFxFunction(expression,"debug",5) != MagickFalse)
{
const char
*type;
size_t
length;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
switch (fx_info->images->colorspace)
{
case CMYKColorspace:
{
switch (channel)
{
case CyanChannel: type="cyan"; break;
case MagentaChannel: type="magenta"; break;
case YellowChannel: type="yellow"; break;
case AlphaChannel: type="alpha"; break;
case BlackChannel: type="black"; break;
default: type="unknown"; break;
}
break;
}
case GRAYColorspace:
{
switch (channel)
{
case RedChannel: type="gray"; break;
case AlphaChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
default:
{
switch (channel)
{
case RedChannel: type="red"; break;
case GreenChannel: type="green"; break;
case BlueChannel: type="blue"; break;
case AlphaChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
}
*subexpression='\0';
length=1;
if (strlen(expression) > 6)
length=CopyMagickString(subexpression,expression+6,MaxTextExtent);
if (length != 0)
subexpression[length-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,
"%s[%.20g,%.20g].%s: %s=%.*g\n",fx_info->images->filename,
(double) x,(double) y,type,subexpression,GetMagickPrecision(),
(double) alpha);
FxReturn(alpha);
}
if (IsFxFunction(expression,"do",2) != MagickFalse)
{
size_t
length;
/*
Parse do(expression,condition test).
*/
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
}
FxReturn(alpha);
}
if (IsFxFunction(expression,"drc",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
FxReturn(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (IsFxFunction(expression,"erf",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(erf(alpha));
}
#endif
if (IsFxFunction(expression,"exp",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
FxReturn(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (IsFxFunction(expression,"floor",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"for",3) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
/*
Parse for(initialization, condition test, expression).
*/
length=CopyMagickString(subexpression,expression+4,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
}
FxReturn(alpha);
}
break;
}
case 'G':
case 'g':
{
if (IsFxFunction(expression,"gauss",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI));
}
if (IsFxFunction(expression,"gcd",3) != MagickFalse)
{
MagickOffsetType
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
gcd=FxGCD((MagickOffsetType) (alpha+0.5),(MagickOffsetType) (*beta+
0.5));
FxReturn((double) gcd);
}
if (LocaleCompare(expression,"g") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleCompare(expression,"hue") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"hypot",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'I':
case 'i':
{
if (IsFxFunction(expression,"if",2) != MagickFalse)
{
double
sans = 0.0;
size_t
length;
length=CopyMagickString(subexpression,expression+3,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
if (fabs(alpha) >= MagickEpsilon)
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(alpha);
}
if (LocaleCompare(expression,"intensity") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"int",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"isnan",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (IsFxFunction(expression,"j0",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"j1",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"jinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha)));
}
#endif
break;
}
case 'L':
case 'l':
{
if (IsFxFunction(expression,"ln",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(log(alpha));
}
if (IsFxFunction(expression,"logtwo",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn(log10(alpha)/log10(2.0));
}
if (IsFxFunction(expression,"log",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
FxReturn((double) QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (IsFxFunction(expression,"max",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (IsFxFunction(expression,"min",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha < *beta ? alpha : *beta);
}
if (IsFxFunction(expression,"mod",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta));
}
if (LocaleCompare(expression,"m") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'N':
case 'n':
{
if (IsFxFunction(expression,"not",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
FxReturn(1.0);
if (LocaleCompare(expression,"o") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
FxReturn(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
FxReturn(MagickPI);
if (IsFxFunction(expression,"pow",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
FxReturn((double) QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
FxReturn(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (IsFxFunction(expression,"rand",4) != MagickFalse)
{
double
alpha;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
FxReturn(alpha);
}
if (IsFxFunction(expression,"round",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if ((alpha-floor(alpha)) < (ceil(alpha)-alpha))
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"r") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"sign",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(alpha < 0.0 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"sinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0)
FxReturn(1.0);
FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha));
}
if (IsFxFunction(expression,"sinh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sinh(alpha));
}
if (IsFxFunction(expression,"sin",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(sin(alpha));
}
if (IsFxFunction(expression,"sqrt",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sqrt(alpha));
}
if (IsFxFunction(expression,"squish",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'T':
case 't':
{
if (IsFxFunction(expression,"tanh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(tanh(alpha));
}
if (IsFxFunction(expression,"tan",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
FxReturn(0.0);
if (IsFxFunction(expression,"trunc",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha >= 0.0)
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'W':
case 'w':
{
if (IsFxFunction(expression,"while",5) != MagickFalse)
{
size_t
length;
/*
Parse while(condition,expression).
*/
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
}
FxReturn(alpha);
}
if (LocaleCompare(expression,"w") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
default:
break;
}
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception);
FxReturn(alpha);
}
MagickExport MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayChannel,0,0,alpha,exception);
fx_info->file=file;
return(status);
}
MagickExport MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const ChannelType channel,const ssize_t x,const ssize_t y,double *alpha,
ExceptionInfo *exception)
{
double
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
% Image *FxImageChannel(const Image *image,const ChannelType channel,
% const char *expression,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
register ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
double
alpha;
FxInfo
**fx_info;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) memset(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
Image
*fx_image;
fx_image=FxImageChannel(image,GrayChannel,expression,exception);
return(fx_image);
}
MagickExport Image *FxImageChannel(const Image *image,const ChannelType channel,
const char *expression,ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass) == MagickFalse)
{
InheritException(exception,&fx_image->exception);
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows,1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
double
alpha;
register IndexPacket
*magick_restrict fx_indexes;
register ssize_t
x;
register PixelPacket
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
fx_indexes=GetCacheViewAuthenticIndexQueue(fx_view);
alpha=0.0;
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
if ((channel & RedChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],RedChannel,x,y,
&alpha,exception);
SetPixelRed(q,ClampToQuantum((MagickRealType) QuantumRange*alpha));
}
if ((channel & GreenChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],GreenChannel,x,y,
&alpha,exception);
SetPixelGreen(q,ClampToQuantum((MagickRealType) QuantumRange*alpha));
}
if ((channel & BlueChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],BlueChannel,x,y,
&alpha,exception);
SetPixelBlue(q,ClampToQuantum((MagickRealType) QuantumRange*alpha));
}
if ((channel & OpacityChannel) != 0)
{
(void) FxEvaluateChannelExpression(fx_info[id],OpacityChannel,x,y,
&alpha,exception);
if (image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum((MagickRealType) QuantumRange*
alpha));
else
SetPixelOpacity(q,ClampToQuantum((MagickRealType) (QuantumRange-
QuantumRange*alpha)));
}
if (((channel & IndexChannel) != 0) &&
(fx_image->colorspace == CMYKColorspace))
{
(void) FxEvaluateChannelExpression(fx_info[id],IndexChannel,x,y,
&alpha,exception);
SetPixelIndex(fx_indexes+x,ClampToQuantum((MagickRealType)
QuantumRange*alpha));
}
q++;
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FxImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
|
GB_unaryop__identity_bool_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_bool_uint16
// op(A') function: GB_tran__identity_bool_uint16
// C type: bool
// A type: uint16_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
bool z = (bool) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_bool_uint16
(
bool *restrict Cx,
const uint16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_bool_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
rhs4sg_revNW.c | // SW4 LICENSE
// # ----------------------------------------------------------------------
// # SW4 - Seismic Waves, 4th order
// # ----------------------------------------------------------------------
// # Copyright (c) 2013, Lawrence Livermore National Security, LLC.
// # Produced at the Lawrence Livermore National Laboratory.
// #
// # Written by:
// # N. Anders Petersson (petersson1@llnl.gov)
// # Bjorn Sjogreen (sjogreen2@llnl.gov)
// #
// # LLNL-CODE-643337
// #
// # All rights reserved.
// #
// # This file is part of SW4, Version: 1.0
// #
// # Please also read LICENCE.txt, which contains "Our Notice and GNU General Public License"
// #
// # This program is free software; you can redistribute it and/or modify
// # it under the terms of the GNU General Public License (as published by
// # the Free Software Foundation) version 2, dated June 1991.
// #
// # This program is distributed in the hope that it will be useful, but
// # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
// # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
// # conditions of the GNU General Public License for more details.
// #
// # You should have received a copy of the GNU General Public License
// # along with this program; if not, write to the Free Software
// # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
#include "sw4.h"
#include <iostream>
using namespace std;
// restrict qualifier does not seem to help much
//void rhs4sg_rev( int ifirst, int ilast, int jfirst, int jlast, int kfirst, int klast,
// int nk, int* onesided, float_sw4* a_acof, float_sw4 * a_bope,
// float_sw4* a_ghcof, float_sw4* a_lu, float_sw4* a_u,
// float_sw4* a_mu, float_sw4* a_lambda,
// float_sw4 h, float_sw4* a_strx, float_sw4* a_stry,
// float_sw4* a_strz )
void rhs4sg_rev( int ifirst, int ilast, int jfirst, int jlast, int kfirst, int klast,
int nk, int* __restrict__ onesided, float_sw4* __restrict__ a_acof, float_sw4 *__restrict__ a_bope,
float_sw4* __restrict__ a_ghcof, float_sw4* __restrict__ a_lu, float_sw4* __restrict__ a_u,
float_sw4* __restrict__ a_mu, float_sw4* __restrict__ a_lambda,
float_sw4 h, float_sw4* __restrict__ a_strx, float_sw4* __restrict__ a_stry,
float_sw4* __restrict__ a_strz )
{
// This would work to create multi-dimensional C arrays:
// float_sw4** b_ar=(float_sw4*)malloc(ni*nj*sizeof(float_sw4*));
// for( int j=0;j<nj;j++)
// b_ar[j] = &a_lu[j-1+ni*(1-1)];
//#define ar(i,j) b_ar[j][i];
// #include <iostream>
// Direct reuse of fortran code by these macro definitions:
#define mu(i,j,k) a_mu[base+i+ni*(j)+nij*(k)]
#define la(i,j,k) a_lambda[base+i+ni*(j)+nij*(k)]
// Reversed indexation
#define u(c,i,j,k) a_u[base3+i+ni*(j)+nij*(k)+nijk*(c)]
#define lu(c,i,j,k) a_lu[base3+i+ni*(j)+nij*(k)+nijk*(c)]
#define strx(i) a_strx[i-ifirst0]
#define stry(j) a_stry[j-jfirst0]
#define strz(k) a_strz[k-kfirst0]
#define acof(i,j,k) a_acof[(i-1)+6*(j-1)+48*(k-1)]
#define bope(i,j) a_bope[i-1+6*(j-1)]
#define ghcof(i) a_ghcof[i-1]
const float_sw4 a1 = 0;
const float_sw4 i6 = 1.0/6;
const float_sw4 i12 = 1.0/12;
const float_sw4 i144 = 1.0/144;
const float_sw4 tf = 0.75;
const int ni = ilast-ifirst+1;
const int nij = ni*(jlast-jfirst+1);
const int nijk = nij*(klast-kfirst+1);
const int base = -(ifirst+ni*jfirst+nij*kfirst);
const int base3 = base-nijk;
const int nic = 3*ni;
const int nijc = 3*nij;
const int ifirst0 = ifirst;
const int jfirst0 = jfirst;
const int kfirst0 = kfirst;
int k1, k2, kb;
int i, j, k, q, m, qb, mb;
float_sw4 mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
float_sw4 r1, r2, r3, mucof, mu1zz, mu2zz, mu3zz;
float_sw4 lap2mu, u3zip2, u3zip1, u3zim1, u3zim2, lau3zx, mu3xz, u3zjp2, u3zjp1, u3zjm1, u3zjm2;
float_sw4 lau3zy, mu3yz, mu1zx, mu2zy, u1zip2, u1zip1, u1zim1, u1zim2;
float_sw4 u2zjp2, u2zjp1, u2zjm1, u2zjm2, lau1xz, lau2yz;
const float_sw4 cof = 1.0/(h*h);
k1 = kfirst+2;
if( onesided[4] == 1 )
k1 = 7;
k2 = klast-2;
if( onesided[5] == 1 )
k2 = nk-6;
// cout << "k1=" << k1 << " k2=" << k2 << " jfirst=" << jfirst << " jlast="<<jlast<<" ifirst="<<ifirst<<" ilast="<<ilast<< endl;
// printf("k1=%d,k2=%d,jfirst=%d,jlast=%d,ifirst=%d,ilast=%d\n",k1,k2,jfirst,jlast,ifirst,ilast);
#pragma omp parallel private(k,i,j,mux1,mux2,mux3,mux4,muy1,muy2,muy3,muy4,\
r1,r2,r3,mucof,mu1zz,mu2zz,mu3zz,lap2mu,q,u3zip2,u3zip1,\
u3zim1,u3zim2,lau3zx,mu3xz,u3zjp2,u3zjp1,u3zjm1,u3zjm2,lau3zy,\
mu3yz,mu1zx,u1zip2,u1zip1,u1zim1,u1zim2,\
u2zjp2,u2zjp1,u2zjm1,u2zjm2,mu2zy,lau1xz,lau2yz,kb,qb,mb,muz1,muz2,muz3,muz4)
{
#pragma omp for
for( k= k1; k <= k2 ; k++ )
for( j=jfirst+2; j <= jlast-2 ; j++ )
#pragma simd
#pragma ivdep
for( i=ifirst+2; i <= ilast-2 ; i++ )
{
/* from inner_loop_4a, 28x3 = 84 ops */
mux1 = mu(i-1,j,k)*strx(i-1)-
tf*(mu(i,j,k)*strx(i)+mu(i-2,j,k)*strx(i-2));
mux2 = mu(i-2,j,k)*strx(i-2)+mu(i+1,j,k)*strx(i+1)+
3*(mu(i,j,k)*strx(i)+mu(i-1,j,k)*strx(i-1));
mux3 = mu(i-1,j,k)*strx(i-1)+mu(i+2,j,k)*strx(i+2)+
3*(mu(i+1,j,k)*strx(i+1)+mu(i,j,k)*strx(i));
mux4 = mu(i+1,j,k)*strx(i+1)-
tf*(mu(i,j,k)*strx(i)+mu(i+2,j,k)*strx(i+2));
muy1 = mu(i,j-1,k)*stry(j-1)-
tf*(mu(i,j,k)*stry(j)+mu(i,j-2,k)*stry(j-2));
muy2 = mu(i,j-2,k)*stry(j-2)+mu(i,j+1,k)*stry(j+1)+
3*(mu(i,j,k)*stry(j)+mu(i,j-1,k)*stry(j-1));
muy3 = mu(i,j-1,k)*stry(j-1)+mu(i,j+2,k)*stry(j+2)+
3*(mu(i,j+1,k)*stry(j+1)+mu(i,j,k)*stry(j));
muy4 = mu(i,j+1,k)*stry(j+1)-
tf*(mu(i,j,k)*stry(j)+mu(i,j+2,k)*stry(j+2));
muz1 = mu(i,j,k-1)*strz(k-1)-
tf*(mu(i,j,k)*strz(k)+mu(i,j,k-2)*strz(k-2));
muz2 = mu(i,j,k-2)*strz(k-2)+mu(i,j,k+1)*strz(k+1)+
3*(mu(i,j,k)*strz(k)+mu(i,j,k-1)*strz(k-1));
muz3 = mu(i,j,k-1)*strz(k-1)+mu(i,j,k+2)*strz(k+2)+
3*(mu(i,j,k+1)*strz(k+1)+mu(i,j,k)*strz(k));
muz4 = mu(i,j,k+1)*strz(k+1)-
tf*(mu(i,j,k)*strz(k)+mu(i,j,k+2)*strz(k+2));
/* xx, yy, and zz derivatives:*/
/* 75 ops */
lu(1,i,j,k) = i6*( strx(i)*( (2*mux1+la(i-1,j,k)*strx(i-1)-
tf*(la(i,j,k)*strx(i)+la(i-2,j,k)*strx(i-2)))*
(u(1,i-2,j,k)-u(1,i,j,k))+
(2*mux2+la(i-2,j,k)*strx(i-2)+la(i+1,j,k)*strx(i+1)+
3*(la(i,j,k)*strx(i)+la(i-1,j,k)*strx(i-1)))*
(u(1,i-1,j,k)-u(1,i,j,k))+
(2*mux3+la(i-1,j,k)*strx(i-1)+la(i+2,j,k)*strx(i+2)+
3*(la(i+1,j,k)*strx(i+1)+la(i,j,k)*strx(i)))*
(u(1,i+1,j,k)-u(1,i,j,k))+
(2*mux4+ la(i+1,j,k)*strx(i+1)-
tf*(la(i,j,k)*strx(i)+la(i+2,j,k)*strx(i+2)))*
(u(1,i+2,j,k)-u(1,i,j,k)) ) + stry(j)*(
muy1*(u(1,i,j-2,k)-u(1,i,j,k)) +
muy2*(u(1,i,j-1,k)-u(1,i,j,k)) +
muy3*(u(1,i,j+1,k)-u(1,i,j,k)) +
muy4*(u(1,i,j+2,k)-u(1,i,j,k)) ) + strz(k)*(
muz1*(u(1,i,j,k-2)-u(1,i,j,k)) +
muz2*(u(1,i,j,k-1)-u(1,i,j,k)) +
muz3*(u(1,i,j,k+1)-u(1,i,j,k)) +
muz4*(u(1,i,j,k+2)-u(1,i,j,k)) ) );
/* 75 ops */
lu(2,i,j,k) = i6*( strx(i)*(mux1*(u(2,i-2,j,k)-u(2,i,j,k)) +
mux2*(u(2,i-1,j,k)-u(2,i,j,k)) +
mux3*(u(2,i+1,j,k)-u(2,i,j,k)) +
mux4*(u(2,i+2,j,k)-u(2,i,j,k)) ) + stry(j)*(
(2*muy1+la(i,j-1,k)*stry(j-1)-
tf*(la(i,j,k)*stry(j)+la(i,j-2,k)*stry(j-2)))*
(u(2,i,j-2,k)-u(2,i,j,k))+
(2*muy2+la(i,j-2,k)*stry(j-2)+la(i,j+1,k)*stry(j+1)+
3*(la(i,j,k)*stry(j)+la(i,j-1,k)*stry(j-1)))*
(u(2,i,j-1,k)-u(2,i,j,k))+
(2*muy3+la(i,j-1,k)*stry(j-1)+la(i,j+2,k)*stry(j+2)+
3*(la(i,j+1,k)*stry(j+1)+la(i,j,k)*stry(j)))*
(u(2,i,j+1,k)-u(2,i,j,k))+
(2*muy4+la(i,j+1,k)*stry(j+1)-
tf*(la(i,j,k)*stry(j)+la(i,j+2,k)*stry(j+2)))*
(u(2,i,j+2,k)-u(2,i,j,k)) ) + strz(k)*(
muz1*(u(2,i,j,k-2)-u(2,i,j,k)) +
muz2*(u(2,i,j,k-1)-u(2,i,j,k)) +
muz3*(u(2,i,j,k+1)-u(2,i,j,k)) +
muz4*(u(2,i,j,k+2)-u(2,i,j,k)) ) );
/* 75 ops */
lu(3,i,j,k) = i6*( strx(i)*(mux1*(u(3,i-2,j,k)-u(3,i,j,k)) +
mux2*(u(3,i-1,j,k)-u(3,i,j,k)) +
mux3*(u(3,i+1,j,k)-u(3,i,j,k)) +
mux4*(u(3,i+2,j,k)-u(3,i,j,k)) ) + stry(j)*(
muy1*(u(3,i,j-2,k)-u(3,i,j,k)) +
muy2*(u(3,i,j-1,k)-u(3,i,j,k)) +
muy3*(u(3,i,j+1,k)-u(3,i,j,k)) +
muy4*(u(3,i,j+2,k)-u(3,i,j,k)) ) + strz(k)*(
(2*muz1+la(i,j,k-1)*strz(k-1)-
tf*(la(i,j,k)*strz(k)+la(i,j,k-2)*strz(k-2)))*
(u(3,i,j,k-2)-u(3,i,j,k))+
(2*muz2+la(i,j,k-2)*strz(k-2)+la(i,j,k+1)*strz(k+1)+
3*(la(i,j,k)*strz(k)+la(i,j,k-1)*strz(k-1)))*
(u(3,i,j,k-1)-u(3,i,j,k))+
(2*muz3+la(i,j,k-1)*strz(k-1)+la(i,j,k+2)*strz(k+2)+
3*(la(i,j,k+1)*strz(k+1)+la(i,j,k)*strz(k)))*
(u(3,i,j,k+1)-u(3,i,j,k))+
(2*muz4+la(i,j,k+1)*strz(k+1)-
tf*(la(i,j,k)*strz(k)+la(i,j,k+2)*strz(k+2)))*
(u(3,i,j,k+2)-u(3,i,j,k)) ) );
/* Mixed derivatives: */
/* 29ops /mixed derivative */
/* 116 ops for r1 */
/* (la*v_y)_x */
lu(1,i,j,k) = lu(1,i,j,k) + strx(i)*stry(j)*
i144*( la(i-2,j,k)*(u(2,i-2,j-2,k)-u(2,i-2,j+2,k)+
8*(-u(2,i-2,j-1,k)+u(2,i-2,j+1,k))) - 8*(
la(i-1,j,k)*(u(2,i-1,j-2,k)-u(2,i-1,j+2,k)+
8*(-u(2,i-1,j-1,k)+u(2,i-1,j+1,k))) )+8*(
la(i+1,j,k)*(u(2,i+1,j-2,k)-u(2,i+1,j+2,k)+
8*(-u(2,i+1,j-1,k)+u(2,i+1,j+1,k))) ) - (
la(i+2,j,k)*(u(2,i+2,j-2,k)-u(2,i+2,j+2,k)+
8*(-u(2,i+2,j-1,k)+u(2,i+2,j+1,k))) ))
/* (la*w_z)_x */
+ strx(i)*strz(k)*
i144*( la(i-2,j,k)*(u(3,i-2,j,k-2)-u(3,i-2,j,k+2)+
8*(-u(3,i-2,j,k-1)+u(3,i-2,j,k+1))) - 8*(
la(i-1,j,k)*(u(3,i-1,j,k-2)-u(3,i-1,j,k+2)+
8*(-u(3,i-1,j,k-1)+u(3,i-1,j,k+1))) )+8*(
la(i+1,j,k)*(u(3,i+1,j,k-2)-u(3,i+1,j,k+2)+
8*(-u(3,i+1,j,k-1)+u(3,i+1,j,k+1))) ) - (
la(i+2,j,k)*(u(3,i+2,j,k-2)-u(3,i+2,j,k+2)+
8*(-u(3,i+2,j,k-1)+u(3,i+2,j,k+1))) ))
/* (mu*v_x)_y */
+ strx(i)*stry(j)*
i144*( mu(i,j-2,k)*(u(2,i-2,j-2,k)-u(2,i+2,j-2,k)+
8*(-u(2,i-1,j-2,k)+u(2,i+1,j-2,k))) - 8*(
mu(i,j-1,k)*(u(2,i-2,j-1,k)-u(2,i+2,j-1,k)+
8*(-u(2,i-1,j-1,k)+u(2,i+1,j-1,k))) )+8*(
mu(i,j+1,k)*(u(2,i-2,j+1,k)-u(2,i+2,j+1,k)+
8*(-u(2,i-1,j+1,k)+u(2,i+1,j+1,k))) ) - (
mu(i,j+2,k)*(u(2,i-2,j+2,k)-u(2,i+2,j+2,k)+
8*(-u(2,i-1,j+2,k)+u(2,i+1,j+2,k))) ))
/* (mu*w_x)_z */
+ strx(i)*strz(k)*
i144*( mu(i,j,k-2)*(u(3,i-2,j,k-2)-u(3,i+2,j,k-2)+
8*(-u(3,i-1,j,k-2)+u(3,i+1,j,k-2))) - 8*(
mu(i,j,k-1)*(u(3,i-2,j,k-1)-u(3,i+2,j,k-1)+
8*(-u(3,i-1,j,k-1)+u(3,i+1,j,k-1))) )+8*(
mu(i,j,k+1)*(u(3,i-2,j,k+1)-u(3,i+2,j,k+1)+
8*(-u(3,i-1,j,k+1)+u(3,i+1,j,k+1))) ) - (
mu(i,j,k+2)*(u(3,i-2,j,k+2)-u(3,i+2,j,k+2)+
8*(-u(3,i-1,j,k+2)+u(3,i+1,j,k+2))) )) ;
lu(1,i,j,k) = cof*lu(1,i,j,k);
/* 116 ops for r2 */
/* (mu*u_y)_x */
lu(2,i,j,k) = lu(2,i,j,k) + strx(i)*stry(j)*
i144*( mu(i-2,j,k)*(u(1,i-2,j-2,k)-u(1,i-2,j+2,k)+
8*(-u(1,i-2,j-1,k)+u(1,i-2,j+1,k))) - 8*(
mu(i-1,j,k)*(u(1,i-1,j-2,k)-u(1,i-1,j+2,k)+
8*(-u(1,i-1,j-1,k)+u(1,i-1,j+1,k))) )+8*(
mu(i+1,j,k)*(u(1,i+1,j-2,k)-u(1,i+1,j+2,k)+
8*(-u(1,i+1,j-1,k)+u(1,i+1,j+1,k))) ) - (
mu(i+2,j,k)*(u(1,i+2,j-2,k)-u(1,i+2,j+2,k)+
8*(-u(1,i+2,j-1,k)+u(1,i+2,j+1,k))) ))
/* (la*u_x)_y */
+ strx(i)*stry(j)*
i144*( la(i,j-2,k)*(u(1,i-2,j-2,k)-u(1,i+2,j-2,k)+
8*(-u(1,i-1,j-2,k)+u(1,i+1,j-2,k))) - 8*(
la(i,j-1,k)*(u(1,i-2,j-1,k)-u(1,i+2,j-1,k)+
8*(-u(1,i-1,j-1,k)+u(1,i+1,j-1,k))) )+8*(
la(i,j+1,k)*(u(1,i-2,j+1,k)-u(1,i+2,j+1,k)+
8*(-u(1,i-1,j+1,k)+u(1,i+1,j+1,k))) ) - (
la(i,j+2,k)*(u(1,i-2,j+2,k)-u(1,i+2,j+2,k)+
8*(-u(1,i-1,j+2,k)+u(1,i+1,j+2,k))) ))
/* (la*w_z)_y */
+ stry(j)*strz(k)*
i144*( la(i,j-2,k)*(u(3,i,j-2,k-2)-u(3,i,j-2,k+2)+
8*(-u(3,i,j-2,k-1)+u(3,i,j-2,k+1))) - 8*(
la(i,j-1,k)*(u(3,i,j-1,k-2)-u(3,i,j-1,k+2)+
8*(-u(3,i,j-1,k-1)+u(3,i,j-1,k+1))) )+8*(
la(i,j+1,k)*(u(3,i,j+1,k-2)-u(3,i,j+1,k+2)+
8*(-u(3,i,j+1,k-1)+u(3,i,j+1,k+1))) ) - (
la(i,j+2,k)*(u(3,i,j+2,k-2)-u(3,i,j+2,k+2)+
8*(-u(3,i,j+2,k-1)+u(3,i,j+2,k+1))) ))
/* (mu*w_y)_z */
+ stry(j)*strz(k)*
i144*( mu(i,j,k-2)*(u(3,i,j-2,k-2)-u(3,i,j+2,k-2)+
8*(-u(3,i,j-1,k-2)+u(3,i,j+1,k-2))) - 8*(
mu(i,j,k-1)*(u(3,i,j-2,k-1)-u(3,i,j+2,k-1)+
8*(-u(3,i,j-1,k-1)+u(3,i,j+1,k-1))) )+8*(
mu(i,j,k+1)*(u(3,i,j-2,k+1)-u(3,i,j+2,k+1)+
8*(-u(3,i,j-1,k+1)+u(3,i,j+1,k+1))) ) - (
mu(i,j,k+2)*(u(3,i,j-2,k+2)-u(3,i,j+2,k+2)+
8*(-u(3,i,j-1,k+2)+u(3,i,j+1,k+2))) )) ;
lu(2,i,j,k) = cof*lu(2,i,j,k);
/* 116 ops for r3 */
/* (mu*u_z)_x */
lu(3,i,j,k) = lu(3,i,j,k) + strx(i)*strz(k)*
i144*( mu(i-2,j,k)*(u(1,i-2,j,k-2)-u(1,i-2,j,k+2)+
8*(-u(1,i-2,j,k-1)+u(1,i-2,j,k+1))) - 8*(
mu(i-1,j,k)*(u(1,i-1,j,k-2)-u(1,i-1,j,k+2)+
8*(-u(1,i-1,j,k-1)+u(1,i-1,j,k+1))) )+8*(
mu(i+1,j,k)*(u(1,i+1,j,k-2)-u(1,i+1,j,k+2)+
8*(-u(1,i+1,j,k-1)+u(1,i+1,j,k+1))) ) - (
mu(i+2,j,k)*(u(1,i+2,j,k-2)-u(1,i+2,j,k+2)+
8*(-u(1,i+2,j,k-1)+u(1,i+2,j,k+1))) ))
/* (mu*v_z)_y */
+ stry(j)*strz(k)*
i144*( mu(i,j-2,k)*(u(2,i,j-2,k-2)-u(2,i,j-2,k+2)+
8*(-u(2,i,j-2,k-1)+u(2,i,j-2,k+1))) - 8*(
mu(i,j-1,k)*(u(2,i,j-1,k-2)-u(2,i,j-1,k+2)+
8*(-u(2,i,j-1,k-1)+u(2,i,j-1,k+1))) )+8*(
mu(i,j+1,k)*(u(2,i,j+1,k-2)-u(2,i,j+1,k+2)+
8*(-u(2,i,j+1,k-1)+u(2,i,j+1,k+1))) ) - (
mu(i,j+2,k)*(u(2,i,j+2,k-2)-u(2,i,j+2,k+2)+
8*(-u(2,i,j+2,k-1)+u(2,i,j+2,k+1))) ))
/* (la*u_x)_z */
+ strx(i)*strz(k)*
i144*( la(i,j,k-2)*(u(1,i-2,j,k-2)-u(1,i+2,j,k-2)+
8*(-u(1,i-1,j,k-2)+u(1,i+1,j,k-2))) - 8*(
la(i,j,k-1)*(u(1,i-2,j,k-1)-u(1,i+2,j,k-1)+
8*(-u(1,i-1,j,k-1)+u(1,i+1,j,k-1))) )+8*(
la(i,j,k+1)*(u(1,i-2,j,k+1)-u(1,i+2,j,k+1)+
8*(-u(1,i-1,j,k+1)+u(1,i+1,j,k+1))) ) - (
la(i,j,k+2)*(u(1,i-2,j,k+2)-u(1,i+2,j,k+2)+
8*(-u(1,i-1,j,k+2)+u(1,i+1,j,k+2))) ))
/* (la*v_y)_z */
+ stry(j)*strz(k)*
i144*( la(i,j,k-2)*(u(2,i,j-2,k-2)-u(2,i,j+2,k-2)+
8*(-u(2,i,j-1,k-2)+u(2,i,j+1,k-2))) - 8*(
la(i,j,k-1)*(u(2,i,j-2,k-1)-u(2,i,j+2,k-1)+
8*(-u(2,i,j-1,k-1)+u(2,i,j+1,k-1))) )+8*(
la(i,j,k+1)*(u(2,i,j-2,k+1)-u(2,i,j+2,k+1)+
8*(-u(2,i,j-1,k+1)+u(2,i,j+1,k+1))) ) - (
la(i,j,k+2)*(u(2,i,j-2,k+2)-u(2,i,j+2,k+2)+
8*(-u(2,i,j-1,k+2)+u(2,i,j+1,k+2))) )) ;
/* 9 ops */
// lu(1,i,j,k) = a1*lu(1,i,j,k) + cof*r1;
// lu(2,i,j,k) = a1*lu(2,i,j,k) + cof*r2;
// lu(3,i,j,k) = a1*lu(3,i,j,k) + cof*r3;
// lu(1,i,j,k) = cof*r1;
// lu(2,i,j,k) = cof*r2;
lu(3,i,j,k) = cof*lu(3,i,j,k);
}
if( onesided[4]==1 )
{
#pragma omp for
for( k=1 ; k<= 6 ; k++ )
/* the centered stencil can be used in the x- and y-directions */
for( j=jfirst+2; j<=jlast-2; j++ )
#pragma simd
#pragma ivdep
for( i=ifirst+2; i<=ilast-2; i++ )
{
/* from inner_loop_4a */
mux1 = mu(i-1,j,k)*strx(i-1)-
tf*(mu(i,j,k)*strx(i)+mu(i-2,j,k)*strx(i-2));
mux2 = mu(i-2,j,k)*strx(i-2)+mu(i+1,j,k)*strx(i+1)+
3*(mu(i,j,k)*strx(i)+mu(i-1,j,k)*strx(i-1));
mux3 = mu(i-1,j,k)*strx(i-1)+mu(i+2,j,k)*strx(i+2)+
3*(mu(i+1,j,k)*strx(i+1)+mu(i,j,k)*strx(i));
mux4 = mu(i+1,j,k)*strx(i+1)-
tf*(mu(i,j,k)*strx(i)+mu(i+2,j,k)*strx(i+2));
muy1 = mu(i,j-1,k)*stry(j-1)-
tf*(mu(i,j,k)*stry(j)+mu(i,j-2,k)*stry(j-2));
muy2 = mu(i,j-2,k)*stry(j-2)+mu(i,j+1,k)*stry(j+1)+
3*(mu(i,j,k)*stry(j)+mu(i,j-1,k)*stry(j-1));
muy3 = mu(i,j-1,k)*stry(j-1)+mu(i,j+2,k)*stry(j+2)+
3*(mu(i,j+1,k)*stry(j+1)+mu(i,j,k)*stry(j));
muy4 = mu(i,j+1,k)*stry(j+1)-
tf*(mu(i,j,k)*stry(j)+mu(i,j+2,k)*stry(j+2));
r1 = i6*(strx(i)*((2*mux1+la(i-1,j,k)*strx(i-1)-
tf*(la(i,j,k)*strx(i)+la(i-2,j,k)*strx(i-2)))*
(u(1,i-2,j,k)-u(1,i,j,k))+
(2*mux2+la(i-2,j,k)*strx(i-2)+la(i+1,j,k)*strx(i+1)+
3*(la(i,j,k)*strx(i)+la(i-1,j,k)*strx(i-1)))*
(u(1,i-1,j,k)-u(1,i,j,k))+
(2*mux3+la(i-1,j,k)*strx(i-1)+la(i+2,j,k)*strx(i+2)+
3*(la(i+1,j,k)*strx(i+1)+la(i,j,k)*strx(i)))*
(u(1,i+1,j,k)-u(1,i,j,k))+
(2*mux4+ la(i+1,j,k)*strx(i+1)-
tf*(la(i,j,k)*strx(i)+la(i+2,j,k)*strx(i+2)))*
(u(1,i+2,j,k)-u(1,i,j,k)) ) + stry(j)*(
+ muy1*(u(1,i,j-2,k)-u(1,i,j,k)) +
muy2*(u(1,i,j-1,k)-u(1,i,j,k)) +
muy3*(u(1,i,j+1,k)-u(1,i,j,k)) +
muy4*(u(1,i,j+2,k)-u(1,i,j,k)) ) );
/* (mu*uz)_z can not be centered */
/* second derivative (mu*u_z)_z at grid point z_k */
/* averaging the coefficient, */
/* leave out the z-supergrid stretching strz, since it will */
/* never be used together with the sbp-boundary operator */
mu1zz = 0;
mu2zz = 0;
mu3zz = 0;
for( q=1; q <= 8; q ++ )
{
// lap2mu= 0;
// mucof = 0;
// for( m=1 ; m<=8; m++ )
// {
// mucof += acof(k,q,m)*mu(i,j,m);
// lap2mu += acof(k,q,m)*(la(i,j,m)+2*mu(i,j,m));
// }
lap2mu = acof(k,q,1)*(la(i,j,1)+2*mu(i,j,1))+acof(k,q,2)*(la(i,j,2)+2*mu(i,j,2))+
acof(k,q,3)*(la(i,j,3)+2*mu(i,j,3))+acof(k,q,4)*(la(i,j,4)+2*mu(i,j,4))+
acof(k,q,5)*(la(i,j,5)+2*mu(i,j,5))+acof(k,q,6)*(la(i,j,6)+2*mu(i,j,6))+
acof(k,q,7)*(la(i,j,7)+2*mu(i,j,7))+acof(k,q,8)*(la(i,j,8)+2*mu(i,j,8));
mucof = acof(k,q,1)*mu(i,j,1)+acof(k,q,2)*mu(i,j,2)+acof(k,q,3)*mu(i,j,3)+acof(k,q,4)*mu(i,j,4)+
acof(k,q,5)*mu(i,j,5)+acof(k,q,6)*mu(i,j,6)+acof(k,q,7)*mu(i,j,7)+acof(k,q,8)*mu(i,j,8);
mu1zz += mucof*u(1,i,j,q);
mu2zz += mucof*u(2,i,j,q);
mu3zz += lap2mu*u(3,i,j,q);
}
/* ghost point only influences the first point (k=1) because ghcof(k)=0 for k>=2*/
r1 = r1 + (mu1zz + ghcof(k)*mu(i,j,1)*u(1,i,j,0));
r2 = i6*(strx(i)*(mux1*(u(2,i-2,j,k)-u(2,i,j,k)) +
mux2*(u(2,i-1,j,k)-u(2,i,j,k)) +
mux3*(u(2,i+1,j,k)-u(2,i,j,k)) +
mux4*(u(2,i+2,j,k)-u(2,i,j,k)) )+ stry(j)*(
(2*muy1+la(i,j-1,k)*stry(j-1)-
tf*(la(i,j,k)*stry(j)+la(i,j-2,k)*stry(j-2)))*
(u(2,i,j-2,k)-u(2,i,j,k))+
(2*muy2+la(i,j-2,k)*stry(j-2)+la(i,j+1,k)*stry(j+1)+
3*(la(i,j,k)*stry(j)+la(i,j-1,k)*stry(j-1)))*
(u(2,i,j-1,k)-u(2,i,j,k))+
(2*muy3+la(i,j-1,k)*stry(j-1)+la(i,j+2,k)*stry(j+2)+
3*(la(i,j+1,k)*stry(j+1)+la(i,j,k)*stry(j)))*
(u(2,i,j+1,k)-u(2,i,j,k))+
(2*muy4+la(i,j+1,k)*stry(j+1)-
tf*(la(i,j,k)*stry(j)+la(i,j+2,k)*stry(j+2)))*
(u(2,i,j+2,k)-u(2,i,j,k)) ) );
/* ghost point only influences the first point (k=1) because ghcof(k)=0 for k>=2 */
r2 = r2 + (mu2zz + ghcof(k)*mu(i,j,1)*u(2,i,j,0));
r3 = i6*(strx(i)*(mux1*(u(3,i-2,j,k)-u(3,i,j,k)) +
mux2*(u(3,i-1,j,k)-u(3,i,j,k)) +
mux3*(u(3,i+1,j,k)-u(3,i,j,k)) +
mux4*(u(3,i+2,j,k)-u(3,i,j,k)) ) + stry(j)*(
muy1*(u(3,i,j-2,k)-u(3,i,j,k)) +
muy2*(u(3,i,j-1,k)-u(3,i,j,k)) +
muy3*(u(3,i,j+1,k)-u(3,i,j,k)) +
muy4*(u(3,i,j+2,k)-u(3,i,j,k)) ) );
/* ghost point only influences the first point (k=1) because ghcof(k)=0 for k>=2 */
r3 = r3 + (mu3zz + ghcof(k)*(la(i,j,1)+2*mu(i,j,1))*
u(3,i,j,0));
/* cross-terms in first component of rhs */
/* (la*v_y)_x */
r1 = r1 + strx(i)*stry(j)*(
i144*( la(i-2,j,k)*(u(2,i-2,j-2,k)-u(2,i-2,j+2,k)+
8*(-u(2,i-2,j-1,k)+u(2,i-2,j+1,k))) - 8*(
la(i-1,j,k)*(u(2,i-1,j-2,k)-u(2,i-1,j+2,k)+
8*(-u(2,i-1,j-1,k)+u(2,i-1,j+1,k))) )+8*(
la(i+1,j,k)*(u(2,i+1,j-2,k)-u(2,i+1,j+2,k)+
8*(-u(2,i+1,j-1,k)+u(2,i+1,j+1,k))) ) - (
la(i+2,j,k)*(u(2,i+2,j-2,k)-u(2,i+2,j+2,k)+
8*(-u(2,i+2,j-1,k)+u(2,i+2,j+1,k))) ))
/* (mu*v_x)_y */
+ i144*( mu(i,j-2,k)*(u(2,i-2,j-2,k)-u(2,i+2,j-2,k)+
8*(-u(2,i-1,j-2,k)+u(2,i+1,j-2,k))) - 8*(
mu(i,j-1,k)*(u(2,i-2,j-1,k)-u(2,i+2,j-1,k)+
8*(-u(2,i-1,j-1,k)+u(2,i+1,j-1,k))) )+8*(
mu(i,j+1,k)*(u(2,i-2,j+1,k)-u(2,i+2,j+1,k)+
8*(-u(2,i-1,j+1,k)+u(2,i+1,j+1,k))) ) - (
mu(i,j+2,k)*(u(2,i-2,j+2,k)-u(2,i+2,j+2,k)+
8*(-u(2,i-1,j+2,k)+u(2,i+1,j+2,k))) )) );
/* (la*w_z)_x: NOT CENTERED */
u3zip2=0;
u3zip1=0;
u3zim1=0;
u3zim2=0;
for( q=1 ; q <=8 ; q++ )
{
u3zip2 += bope(k,q)*u(3,i+2,j,q);
u3zip1 += bope(k,q)*u(3,i+1,j,q);
u3zim1 += bope(k,q)*u(3,i-1,j,q);
u3zim2 += bope(k,q)*u(3,i-2,j,q);
}
lau3zx= i12*(-la(i+2,j,k)*u3zip2 + 8*la(i+1,j,k)*u3zip1
-8*la(i-1,j,k)*u3zim1 + la(i-2,j,k)*u3zim2);
r1 = r1 + strx(i)*lau3zx;
/* (mu*w_x)_z: NOT CENTERED */
mu3xz=0;
for( q=1 ; q<=8 ; q++ )
mu3xz += bope(k,q)*( mu(i,j,q)*i12*
(-u(3,i+2,j,q) + 8*u(3,i+1,j,q)
-8*u(3,i-1,j,q) + u(3,i-2,j,q)) );
r1 = r1 + strx(i)*mu3xz;
/* cross-terms in second component of rhs */
/* (mu*u_y)_x */
r2 = r2 + strx(i)*stry(j)*(
i144*( mu(i-2,j,k)*(u(1,i-2,j-2,k)-u(1,i-2,j+2,k)+
8*(-u(1,i-2,j-1,k)+u(1,i-2,j+1,k))) - 8*(
mu(i-1,j,k)*(u(1,i-1,j-2,k)-u(1,i-1,j+2,k)+
8*(-u(1,i-1,j-1,k)+u(1,i-1,j+1,k))) )+8*(
mu(i+1,j,k)*(u(1,i+1,j-2,k)-u(1,i+1,j+2,k)+
8*(-u(1,i+1,j-1,k)+u(1,i+1,j+1,k))) ) - (
mu(i+2,j,k)*(u(1,i+2,j-2,k)-u(1,i+2,j+2,k)+
8*(-u(1,i+2,j-1,k)+u(1,i+2,j+1,k))) ))
/* (la*u_x)_y */
+ i144*( la(i,j-2,k)*(u(1,i-2,j-2,k)-u(1,i+2,j-2,k)+
8*(-u(1,i-1,j-2,k)+u(1,i+1,j-2,k))) - 8*(
la(i,j-1,k)*(u(1,i-2,j-1,k)-u(1,i+2,j-1,k)+
8*(-u(1,i-1,j-1,k)+u(1,i+1,j-1,k))) )+8*(
la(i,j+1,k)*(u(1,i-2,j+1,k)-u(1,i+2,j+1,k)+
8*(-u(1,i-1,j+1,k)+u(1,i+1,j+1,k))) ) - (
la(i,j+2,k)*(u(1,i-2,j+2,k)-u(1,i+2,j+2,k)+
8*(-u(1,i-1,j+2,k)+u(1,i+1,j+2,k))) )) );
/* (la*w_z)_y : NOT CENTERED */
u3zjp2=0;
u3zjp1=0;
u3zjm1=0;
u3zjm2=0;
for( q=1 ; q <=8 ; q++ )
{
u3zjp2 += bope(k,q)*u(3,i,j+2,q);
u3zjp1 += bope(k,q)*u(3,i,j+1,q);
u3zjm1 += bope(k,q)*u(3,i,j-1,q);
u3zjm2 += bope(k,q)*u(3,i,j-2,q);
}
lau3zy= i12*(-la(i,j+2,k)*u3zjp2 + 8*la(i,j+1,k)*u3zjp1
-8*la(i,j-1,k)*u3zjm1 + la(i,j-2,k)*u3zjm2);
r2 = r2 + stry(j)*lau3zy;
/* (mu*w_y)_z: NOT CENTERED */
mu3yz=0;
for( q=1 ; q <=8 ; q++ )
mu3yz += bope(k,q)*( mu(i,j,q)*i12*
(-u(3,i,j+2,q) + 8*u(3,i,j+1,q)
-8*u(3,i,j-1,q) + u(3,i,j-2,q)) );
r2 = r2 + stry(j)*mu3yz;
/* No centered cross terms in r3 */
/* (mu*u_z)_x: NOT CENTERED */
u1zip2=0;
u1zip1=0;
u1zim1=0;
u1zim2=0;
for( q=1 ; q <=8 ; q++ )
{
u1zip2 += bope(k,q)*u(1,i+2,j,q);
u1zip1 += bope(k,q)*u(1,i+1,j,q);
u1zim1 += bope(k,q)*u(1,i-1,j,q);
u1zim2 += bope(k,q)*u(1,i-2,j,q);
}
mu1zx= i12*(-mu(i+2,j,k)*u1zip2 + 8*mu(i+1,j,k)*u1zip1
-8*mu(i-1,j,k)*u1zim1 + mu(i-2,j,k)*u1zim2);
r3 = r3 + strx(i)*mu1zx;
/* (mu*v_z)_y: NOT CENTERED */
u2zjp2=0;
u2zjp1=0;
u2zjm1=0;
u2zjm2=0;
for( q=1 ; q <=8 ; q++ )
{
u2zjp2 += bope(k,q)*u(2,i,j+2,q);
u2zjp1 += bope(k,q)*u(2,i,j+1,q);
u2zjm1 += bope(k,q)*u(2,i,j-1,q);
u2zjm2 += bope(k,q)*u(2,i,j-2,q);
}
mu2zy= i12*(-mu(i,j+2,k)*u2zjp2 + 8*mu(i,j+1,k)*u2zjp1
-8*mu(i,j-1,k)*u2zjm1 + mu(i,j-2,k)*u2zjm2);
r3 = r3 + stry(j)*mu2zy;
/* (la*u_x)_z: NOT CENTERED */
lau1xz=0;
for( q=1 ; q <=8 ; q++ )
lau1xz += bope(k,q)*( la(i,j,q)*i12*
(-u(1,i+2,j,q) + 8*u(1,i+1,j,q)
-8*u(1,i-1,j,q) + u(1,i-2,j,q)) );
r3 = r3 + strx(i)*lau1xz;
/* (la*v_y)_z: NOT CENTERED */
lau2yz=0;
for( q=1 ; q <=8 ; q++ )
lau2yz += bope(k,q)*( la(i,j,q)*i12*
(-u(2,i,j+2,q) + 8*u(2,i,j+1,q)
-8*u(2,i,j-1,q) + u(2,i,j-2,q)) );
r3 = r3 + stry(j)*lau2yz;
lu(1,i,j,k) = a1*lu(1,i,j,k) + cof*r1;
lu(2,i,j,k) = a1*lu(2,i,j,k) + cof*r2;
lu(3,i,j,k) = a1*lu(3,i,j,k) + cof*r3;
}
}
if( onesided[5] == 1 )
{
#pragma omp for
for( k = nk-5 ; k <= nk ; k++ )
for( j=jfirst+2; j<=jlast-2; j++ )
#pragma simd
#pragma ivdep
for( i=ifirst+2; i<=ilast-2; i++ )
{
/* from inner_loop_4a */
mux1 = mu(i-1,j,k)*strx(i-1)-
tf*(mu(i,j,k)*strx(i)+mu(i-2,j,k)*strx(i-2));
mux2 = mu(i-2,j,k)*strx(i-2)+mu(i+1,j,k)*strx(i+1)+
3*(mu(i,j,k)*strx(i)+mu(i-1,j,k)*strx(i-1));
mux3 = mu(i-1,j,k)*strx(i-1)+mu(i+2,j,k)*strx(i+2)+
3*(mu(i+1,j,k)*strx(i+1)+mu(i,j,k)*strx(i));
mux4 = mu(i+1,j,k)*strx(i+1)-
tf*(mu(i,j,k)*strx(i)+mu(i+2,j,k)*strx(i+2));
muy1 = mu(i,j-1,k)*stry(j-1)-
tf*(mu(i,j,k)*stry(j)+mu(i,j-2,k)*stry(j-2));
muy2 = mu(i,j-2,k)*stry(j-2)+mu(i,j+1,k)*stry(j+1)+
3*(mu(i,j,k)*stry(j)+mu(i,j-1,k)*stry(j-1));
muy3 = mu(i,j-1,k)*stry(j-1)+mu(i,j+2,k)*stry(j+2)+
3*(mu(i,j+1,k)*stry(j+1)+mu(i,j,k)*stry(j));
muy4 = mu(i,j+1,k)*stry(j+1)-
tf*(mu(i,j,k)*stry(j)+mu(i,j+2,k)*stry(j+2));
/* xx, yy, and zz derivatives: */
/* note that we could have introduced intermediate variables for the average of lambda */
/* in the same way as we did for mu */
r1 = i6*(strx(i)*((2*mux1+la(i-1,j,k)*strx(i-1)-
tf*(la(i,j,k)*strx(i)+la(i-2,j,k)*strx(i-2)))*
(u(1,i-2,j,k)-u(1,i,j,k))+
(2*mux2+la(i-2,j,k)*strx(i-2)+la(i+1,j,k)*strx(i+1)+
3*(la(i,j,k)*strx(i)+la(i-1,j,k)*strx(i-1)))*
(u(1,i-1,j,k)-u(1,i,j,k))+
(2*mux3+la(i-1,j,k)*strx(i-1)+la(i+2,j,k)*strx(i+2)+
3*(la(i+1,j,k)*strx(i+1)+la(i,j,k)*strx(i)))*
(u(1,i+1,j,k)-u(1,i,j,k))+
(2*mux4+ la(i+1,j,k)*strx(i+1)-
tf*(la(i,j,k)*strx(i)+la(i+2,j,k)*strx(i+2)))*
(u(1,i+2,j,k)-u(1,i,j,k)) ) + stry(j)*(
+ muy1*(u(1,i,j-2,k)-u(1,i,j,k)) +
muy2*(u(1,i,j-1,k)-u(1,i,j,k)) +
muy3*(u(1,i,j+1,k)-u(1,i,j,k)) +
muy4*(u(1,i,j+2,k)-u(1,i,j,k)) ) );
/* all indices ending with 'b' are indices relative to the boundary, going into the domain (1,2,3,...)*/
kb = nk-k+1;
/* all coefficient arrays (acof, bope, ghcof) should be indexed with these indices */
/* all solution and material property arrays should be indexed with (i,j,k) */
/* (mu*uz)_z can not be centered */
/* second derivative (mu*u_z)_z at grid point z_k */
/* averaging the coefficient */
mu1zz = 0;
mu2zz = 0;
mu3zz = 0;
for( qb=1; qb <= 8 ; qb++ )
{
mucof = 0;
lap2mu = 0;
for( mb=1; mb <= 8; mb++ )
{
mucof += acof(kb,qb,mb)*mu(i,j,nk-mb+1);
lap2mu += acof(kb,qb,mb)*(2*mu(i,j,nk-mb+1)+la(i,j,nk-mb+1));
}
mu1zz += mucof*u(1,i,j,nk-qb+1);
mu2zz += mucof*u(2,i,j,nk-qb+1);
mu3zz += lap2mu*u(3,i,j,nk-qb+1);
}
/* computing the second derivative */
/* ghost point only influences the first point (k=1) because ghcof(k)=0 for k>=2*/
r1 = r1 + (mu1zz + ghcof(kb)*mu(i,j,nk)*u(1,i,j,nk+1));
r2 = i6*(strx(i)*(mux1*(u(2,i-2,j,k)-u(2,i,j,k)) +
mux2*(u(2,i-1,j,k)-u(2,i,j,k)) +
mux3*(u(2,i+1,j,k)-u(2,i,j,k)) +
mux4*(u(2,i+2,j,k)-u(2,i,j,k)) )+ stry(j)*(
(2*muy1+la(i,j-1,k)*stry(j-1)-
tf*(la(i,j,k)*stry(j)+la(i,j-2,k)*stry(j-2)))*
(u(2,i,j-2,k)-u(2,i,j,k))+
(2*muy2+la(i,j-2,k)*stry(j-2)+la(i,j+1,k)*stry(j+1)+
3*(la(i,j,k)*stry(j)+la(i,j-1,k)*stry(j-1)))*
(u(2,i,j-1,k)-u(2,i,j,k))+
(2*muy3+la(i,j-1,k)*stry(j-1)+la(i,j+2,k)*stry(j+2)+
3*(la(i,j+1,k)*stry(j+1)+la(i,j,k)*stry(j)))*
(u(2,i,j+1,k)-u(2,i,j,k))+
(2*muy4+la(i,j+1,k)*stry(j+1)-
tf*(la(i,j,k)*stry(j)+la(i,j+2,k)*stry(j+2)))*
(u(2,i,j+2,k)-u(2,i,j,k)) ) );
/* (mu*vz)_z can not be centered */
/* second derivative (mu*v_z)_z at grid point z_k */
/* averaging the coefficient: already done above */
r2 = r2 + (mu2zz + ghcof(kb)*mu(i,j,nk)*u(2,i,j,nk+1));
r3 = i6*(strx(i)*(mux1*(u(3,i-2,j,k)-u(3,i,j,k)) +
mux2*(u(3,i-1,j,k)-u(3,i,j,k)) +
mux3*(u(3,i+1,j,k)-u(3,i,j,k)) +
mux4*(u(3,i+2,j,k)-u(3,i,j,k)) ) + stry(j)*(
muy1*(u(3,i,j-2,k)-u(3,i,j,k)) +
muy2*(u(3,i,j-1,k)-u(3,i,j,k)) +
muy3*(u(3,i,j+1,k)-u(3,i,j,k)) +
muy4*(u(3,i,j+2,k)-u(3,i,j,k)) ) );
r3 = r3 + (mu3zz + ghcof(kb)*(la(i,j,nk)+2*mu(i,j,nk))*
u(3,i,j,nk+1));
/* cross-terms in first component of rhs */
/* (la*v_y)_x */
r1 = r1 + strx(i)*stry(j)*(
i144*( la(i-2,j,k)*(u(2,i-2,j-2,k)-u(2,i-2,j+2,k)+
8*(-u(2,i-2,j-1,k)+u(2,i-2,j+1,k))) - 8*(
la(i-1,j,k)*(u(2,i-1,j-2,k)-u(2,i-1,j+2,k)+
8*(-u(2,i-1,j-1,k)+u(2,i-1,j+1,k))) )+8*(
la(i+1,j,k)*(u(2,i+1,j-2,k)-u(2,i+1,j+2,k)+
8*(-u(2,i+1,j-1,k)+u(2,i+1,j+1,k))) ) - (
la(i+2,j,k)*(u(2,i+2,j-2,k)-u(2,i+2,j+2,k)+
8*(-u(2,i+2,j-1,k)+u(2,i+2,j+1,k))) ))
/* (mu*v_x)_y */
+ i144*( mu(i,j-2,k)*(u(2,i-2,j-2,k)-u(2,i+2,j-2,k)+
8*(-u(2,i-1,j-2,k)+u(2,i+1,j-2,k))) - 8*(
mu(i,j-1,k)*(u(2,i-2,j-1,k)-u(2,i+2,j-1,k)+
8*(-u(2,i-1,j-1,k)+u(2,i+1,j-1,k))) )+8*(
mu(i,j+1,k)*(u(2,i-2,j+1,k)-u(2,i+2,j+1,k)+
8*(-u(2,i-1,j+1,k)+u(2,i+1,j+1,k))) ) - (
mu(i,j+2,k)*(u(2,i-2,j+2,k)-u(2,i+2,j+2,k)+
8*(-u(2,i-1,j+2,k)+u(2,i+1,j+2,k))) )) );
/* (la*w_z)_x: NOT CENTERED */
u3zip2=0;
u3zip1=0;
u3zim1=0;
u3zim2=0;
for( qb=1; qb <= 8 ; qb++ )
{
u3zip2 -= bope(kb,qb)*u(3,i+2,j,nk-qb+1);
u3zip1 -= bope(kb,qb)*u(3,i+1,j,nk-qb+1);
u3zim1 -= bope(kb,qb)*u(3,i-1,j,nk-qb+1);
u3zim2 -= bope(kb,qb)*u(3,i-2,j,nk-qb+1);
}
lau3zx= i12*(-la(i+2,j,k)*u3zip2 + 8*la(i+1,j,k)*u3zip1
-8*la(i-1,j,k)*u3zim1 + la(i-2,j,k)*u3zim2);
r1 = r1 + strx(i)*lau3zx;
/* (mu*w_x)_z: NOT CENTERED */
mu3xz=0;
for( qb=1; qb <= 8 ; qb++ )
mu3xz -= bope(kb,qb)*( mu(i,j,nk-qb+1)*i12*
(-u(3,i+2,j,nk-qb+1) + 8*u(3,i+1,j,nk-qb+1)
-8*u(3,i-1,j,nk-qb+1) + u(3,i-2,j,nk-qb+1)) );
r1 = r1 + strx(i)*mu3xz;
/* cross-terms in second component of rhs */
/* (mu*u_y)_x */
r2 = r2 + strx(i)*stry(j)*(
i144*( mu(i-2,j,k)*(u(1,i-2,j-2,k)-u(1,i-2,j+2,k)+
8*(-u(1,i-2,j-1,k)+u(1,i-2,j+1,k))) - 8*(
mu(i-1,j,k)*(u(1,i-1,j-2,k)-u(1,i-1,j+2,k)+
8*(-u(1,i-1,j-1,k)+u(1,i-1,j+1,k))) )+8*(
mu(i+1,j,k)*(u(1,i+1,j-2,k)-u(1,i+1,j+2,k)+
8*(-u(1,i+1,j-1,k)+u(1,i+1,j+1,k))) ) - (
mu(i+2,j,k)*(u(1,i+2,j-2,k)-u(1,i+2,j+2,k)+
8*(-u(1,i+2,j-1,k)+u(1,i+2,j+1,k))) ))
/* (la*u_x)_y */
+ i144*( la(i,j-2,k)*(u(1,i-2,j-2,k)-u(1,i+2,j-2,k)+
8*(-u(1,i-1,j-2,k)+u(1,i+1,j-2,k))) - 8*(
la(i,j-1,k)*(u(1,i-2,j-1,k)-u(1,i+2,j-1,k)+
8*(-u(1,i-1,j-1,k)+u(1,i+1,j-1,k))) )+8*(
la(i,j+1,k)*(u(1,i-2,j+1,k)-u(1,i+2,j+1,k)+
8*(-u(1,i-1,j+1,k)+u(1,i+1,j+1,k))) ) - (
la(i,j+2,k)*(u(1,i-2,j+2,k)-u(1,i+2,j+2,k)+
8*(-u(1,i-1,j+2,k)+u(1,i+1,j+2,k))) )) );
/* (la*w_z)_y : NOT CENTERED */
u3zjp2=0;
u3zjp1=0;
u3zjm1=0;
u3zjm2=0;
for( qb=1; qb <= 8 ; qb++ )
{
u3zjp2 -= bope(kb,qb)*u(3,i,j+2,nk-qb+1);
u3zjp1 -= bope(kb,qb)*u(3,i,j+1,nk-qb+1);
u3zjm1 -= bope(kb,qb)*u(3,i,j-1,nk-qb+1);
u3zjm2 -= bope(kb,qb)*u(3,i,j-2,nk-qb+1);
}
lau3zy= i12*(-la(i,j+2,k)*u3zjp2 + 8*la(i,j+1,k)*u3zjp1
-8*la(i,j-1,k)*u3zjm1 + la(i,j-2,k)*u3zjm2);
r2 = r2 + stry(j)*lau3zy;
/* (mu*w_y)_z: NOT CENTERED */
mu3yz=0;
for( qb=1; qb <= 8 ; qb++ )
mu3yz -= bope(kb,qb)*( mu(i,j,nk-qb+1)*i12*
(-u(3,i,j+2,nk-qb+1) + 8*u(3,i,j+1,nk-qb+1)
-8*u(3,i,j-1,nk-qb+1) + u(3,i,j-2,nk-qb+1)) );
r2 = r2 + stry(j)*mu3yz;
/* No centered cross terms in r3 */
/* (mu*u_z)_x: NOT CENTERED */
u1zip2=0;
u1zip1=0;
u1zim1=0;
u1zim2=0;
for( qb=1; qb <= 8 ; qb++ )
{
u1zip2 -= bope(kb,qb)*u(1,i+2,j,nk-qb+1);
u1zip1 -= bope(kb,qb)*u(1,i+1,j,nk-qb+1);
u1zim1 -= bope(kb,qb)*u(1,i-1,j,nk-qb+1);
u1zim2 -= bope(kb,qb)*u(1,i-2,j,nk-qb+1);
}
mu1zx= i12*(-mu(i+2,j,k)*u1zip2 + 8*mu(i+1,j,k)*u1zip1
-8*mu(i-1,j,k)*u1zim1 + mu(i-2,j,k)*u1zim2);
r3 = r3 + strx(i)*mu1zx;
/* (mu*v_z)_y: NOT CENTERED */
u2zjp2=0;
u2zjp1=0;
u2zjm1=0;
u2zjm2=0;
for( qb=1; qb <= 8 ; qb++ )
{
u2zjp2 -= bope(kb,qb)*u(2,i,j+2,nk-qb+1);
u2zjp1 -= bope(kb,qb)*u(2,i,j+1,nk-qb+1);
u2zjm1 -= bope(kb,qb)*u(2,i,j-1,nk-qb+1);
u2zjm2 -= bope(kb,qb)*u(2,i,j-2,nk-qb+1);
}
mu2zy= i12*(-mu(i,j+2,k)*u2zjp2 + 8*mu(i,j+1,k)*u2zjp1
-8*mu(i,j-1,k)*u2zjm1 + mu(i,j-2,k)*u2zjm2);
r3 = r3 + stry(j)*mu2zy;
/* (la*u_x)_z: NOT CENTERED */
lau1xz=0;
for( qb=1; qb <= 8 ; qb++ )
lau1xz -= bope(kb,qb)*( la(i,j,nk-qb+1)*i12*
(-u(1,i+2,j,nk-qb+1) + 8*u(1,i+1,j,nk-qb+1)
-8*u(1,i-1,j,nk-qb+1) + u(1,i-2,j,nk-qb+1)) );
r3 = r3 + strx(i)*lau1xz;
/* (la*v_y)_z: NOT CENTERED */
lau2yz=0;
for( qb=1; qb <= 8 ; qb++ )
{
lau2yz -= bope(kb,qb)*( la(i,j,nk-qb+1)*i12*
(-u(2,i,j+2,nk-qb+1) + 8*u(2,i,j+1,nk-qb+1)
-8*u(2,i,j-1,nk-qb+1) + u(2,i,j-2,nk-qb+1)) );
}
r3 = r3 + stry(j)*lau2yz;
lu(1,i,j,k) = a1*lu(1,i,j,k) + cof*r1;
lu(2,i,j,k) = a1*lu(2,i,j,k) + cof*r2;
lu(3,i,j,k) = a1*lu(3,i,j,k) + cof*r3;
}
}
}
#undef mu
#undef la
#undef u
#undef lu
#undef strx
#undef stry
#undef strz
}
|
GraphIO.h | #ifndef __GRAPH_IO_H__
#define __GRAPH_IO_H__
#include <algorithm>
#include <cassert>
#include <cctype>
#include <cstdio>
#include <cstring>
#include <fstream>
#include <iostream>
#include <limits>
#include <sstream>
#include <string>
#include <unordered_map>
#include "Meta.h"
#include "TimeMeasurer.h"
#if defined(OPENMP)
#include <omp.h>
#endif
enum GraphFormat { SNAP, BIN, SNAP_LVID, ADJ };
class GraphIO {
public:
static void ReadDataFile(std::string& filename, bool directed,
size_t& vertex_count, size_t& edge_count,
uintE*& row_ptrs, uintV*& cols,
GraphFormat graph_format) {
switch (graph_format) {
case SNAP:
ReadSNAPFile(filename, directed, vertex_count, edge_count, row_ptrs,
cols);
break;
case BIN:
ReadCSRBinFile(filename, directed, vertex_count, edge_count, row_ptrs,
cols);
break;
case SNAP_LVID:
ReadSNAPLargeVIDFile(filename, directed, vertex_count, edge_count,
row_ptrs, cols);
break;
default:
assert(false);
}
}
static void ReadDataFile(std::string& filename, bool directed,
size_t& vertex_count, size_t& edge_count,
uintE*& row_ptrs, uintV*& cols) {
std::string suffix = filename.substr(filename.rfind(".") + 1);
GraphFormat graph_format;
if (suffix == "txt") {
graph_format = SNAP;
} else if (suffix == "bin") {
graph_format = BIN;
} else if (suffix == "lvid") {
graph_format = SNAP_LVID;
} else {
assert(false);
}
ReadDataFile(filename, directed, vertex_count, edge_count, row_ptrs, cols,
graph_format);
}
static void ReadPartitionFile(std::string& filename, size_t partition_num,
size_t vertex_count,
uintP* vertex_partition_map) {
if (partition_num == 1 || filename.length() == 0) {
partition_num = 1;
for (uintV u = 0; u < vertex_count; u++) {
vertex_partition_map[u] = 0;
}
} else {
std::ifstream file(filename.c_str(), std::fstream::in);
size_t pid;
size_t max_pid = 0;
for (uintV u = 0; u < vertex_count; ++u) {
file >> pid;
max_pid = std::max(max_pid, pid);
vertex_partition_map[u] = pid;
}
file.close();
assert(max_pid + 1 == partition_num);
}
}
static void ReadCSRBinFile(std::string& filename, bool directed,
size_t& vertex_count, size_t& edge_count,
uintE*& row_ptrs, uintV*& cols) {
// CSR is a homemade format
// It removes parallel edges and self loops, but it may contain dangling
// nodes
vertex_count = 0;
edge_count = 0;
row_ptrs = NULL;
cols = NULL;
TimeMeasurer timer;
timer.StartTimer();
std::cout << "start read csr bin file....";
FILE* file_in = fopen(filename.c_str(), "rb");
assert(file_in != NULL);
fseek(file_in, 0, SEEK_SET);
size_t res = 0;
size_t uintV_size = 0, uintE_size = 0;
res += fread(&uintV_size, sizeof(size_t), 1, file_in);
res += fread(&uintE_size, sizeof(size_t), 1, file_in);
res += fread(&vertex_count, sizeof(size_t), 1, file_in);
res += fread(&edge_count, sizeof(size_t), 1, file_in);
std::cout << "uintV_size=" << uintV_size << ",uintE_size=" << uintE_size
<< std::endl;
std::cout << "vertex_count=" << vertex_count << ",edge_count=" << edge_count
<< std::endl;
assert(uintV_size == sizeof(uintV));
assert(uintE_size == sizeof(uintE));
row_ptrs = new uintE[vertex_count + 1];
cols = new uintV[edge_count];
for (uintV u = 0; u <= vertex_count; ++u) {
res += fread(&row_ptrs[u], sizeof(uintE), 1, file_in);
}
for (uintV u = 0; u < vertex_count; ++u) {
for (uintE j = row_ptrs[u]; j < row_ptrs[u + 1]; ++j) {
res += fread(&cols[j], sizeof(uintV), 1, file_in);
}
}
assert(res == (4 + (vertex_count + 1) + edge_count));
fgetc(file_in);
assert(feof(file_in));
fclose(file_in);
timer.EndTimer();
std::cout << "finish read csr bin file, elapsed_time="
<< timer.GetElapsedMicroSeconds() / 1000.0 << "ms" << std::endl;
}
// Some data graphs may contain large vertex ids, e.g., in web graphs,
// the vertex ids could be some large integers that cannot be stored in
// even in long long int.
// So we need to use a map to sequentialize the vertex ids.
static void ReadSNAPLargeVIDFile(std::string& filename, bool directed,
size_t& vertex_count, size_t& edge_count,
uintE*& row_ptrs, uintV*& cols) {
vertex_count = 0;
edge_count = 0;
row_ptrs = NULL;
cols = NULL;
std::cout << "start build csr..." << std::endl;
// const char* kDelimiters = " ,;\t";
const char* kDelimiters = "0123456789";
std::unordered_map<std::string, uintV> ids;
std::vector<uintV> edge_pairs;
{
std::ifstream file(filename.c_str(), std::fstream::in);
std::string line;
while (getline(file, line)) {
if (line.length() == 0 || !std::isdigit(line[0])) continue;
std::vector<std::string> num_strs;
size_t cur_pos = 0;
while (cur_pos < line.length()) {
cur_pos =
line.find_first_of(kDelimiters, cur_pos, strlen(kDelimiters));
if (cur_pos < line.length()) {
size_t next_pos = line.find_first_not_of(kDelimiters, cur_pos,
strlen(kDelimiters));
num_strs.push_back(line.substr(cur_pos, next_pos - cur_pos));
assert(next_pos > cur_pos);
cur_pos = next_pos;
}
}
for (auto& str : num_strs) {
assert(str.length());
for (auto ch : str) {
assert(std::isdigit(ch));
}
}
for (auto& str : num_strs) {
if (ids.find(str) == ids.end()) {
ids.insert(std::make_pair(str, vertex_count++));
}
edge_pairs.push_back(ids[str]);
}
}
file.close();
}
ids.clear();
std::cout << "edge pairs size=" << edge_pairs.size() << std::endl;
assert(edge_pairs.size() % 2 == 0);
edge_count = edge_pairs.size() / 2;
if (!directed) {
edge_count *= 2;
}
std::vector<uintE> offsets(vertex_count + 1, 0);
for (size_t i = 0; i < edge_pairs.size(); i += 2) {
offsets[edge_pairs[i]]++;
if (!directed) {
offsets[edge_pairs[i + 1]]++;
}
}
row_ptrs = new uintE[vertex_count + 1];
cols = new uintV[edge_count];
uintE prefix = 0;
for (uintV i = 0; i <= vertex_count; ++i) {
row_ptrs[i] = prefix;
prefix += offsets[i];
offsets[i] = row_ptrs[i];
}
for (size_t i = 0; i < edge_pairs.size(); i += 2) {
cols[offsets[edge_pairs[i]]++] = edge_pairs[i + 1];
if (!directed) {
cols[offsets[edge_pairs[i + 1]]++] = edge_pairs[i];
}
}
offsets.clear();
edge_pairs.clear();
SortCSRArray(vertex_count, row_ptrs, cols);
std::cout << "finish building CSR" << std::endl;
}
static void ReadSNAPFile(std::string& filename, bool directed,
size_t& vertex_count, size_t& edge_count,
uintE*& row_ptrs, uintV*& cols) {
vertex_count = 0;
edge_count = 0;
row_ptrs = NULL;
cols = NULL;
std::cout << "start build csr..." << std::endl;
uintV min_vertex_id = kMaxuintV;
uintV max_vertex_id = kMinuintV;
{
std::ifstream file(filename.c_str(), std::fstream::in);
std::string line;
uintV vids[2];
while (getline(file, line)) {
if (line.length() == 0 || !std::isdigit(line[0])) continue;
std::istringstream iss(line);
for (int i = 0; i < 2; ++i) {
iss >> vids[i];
min_vertex_id = std::min(min_vertex_id, vids[i]);
max_vertex_id = std::max(max_vertex_id, vids[i]);
}
edge_count++;
}
file.close();
}
vertex_count = max_vertex_id - min_vertex_id + 1;
if (!directed) edge_count *= 2;
std::cout << "vertex_count=" << vertex_count << ",edge_count=" << edge_count
<< std::endl;
row_ptrs = new uintE[vertex_count + 1];
cols = new uintV[edge_count];
auto offsets = new uintE[vertex_count + 1];
memset(offsets, 0, sizeof(uintE) * (vertex_count + 1));
{
std::ifstream file(filename.c_str(), std::fstream::in);
std::string line;
uintV vids[2];
while (getline(file, line)) {
if (line.length() == 0 || !std::isdigit(line[0])) continue;
std::istringstream iss(line);
for (int i = 0; i < 2; ++i) iss >> vids[i], vids[i] -= min_vertex_id;
offsets[vids[0]]++;
if (!directed) {
offsets[vids[1]]++;
}
}
file.close();
}
uintE prefix = 0;
for (size_t i = 0; i < vertex_count + 1; ++i) {
row_ptrs[i] = prefix;
prefix += offsets[i];
offsets[i] = row_ptrs[i];
}
{
std::ifstream file(filename.c_str(), std::fstream::in);
std::string line;
uintV vids[2];
while (getline(file, line)) {
if (line.length() == 0 || !std::isdigit(line[0])) continue;
std::istringstream iss(line);
for (int i = 0; i < 2; ++i) iss >> vids[i], vids[i] -= min_vertex_id;
cols[offsets[vids[0]]++] = vids[1];
if (!directed) {
cols[offsets[vids[1]]++] = vids[0];
}
}
file.close();
}
delete[] offsets;
offsets = NULL;
SortCSRArray(vertex_count, row_ptrs, cols);
std::cout << "finish building CSR" << std::endl;
}
// for the input of testing
static void ReadFromVector(std::vector<std::vector<uintV>>& data,
size_t& vertex_count, size_t& edge_count,
uintE*& row_ptrs, uintV*& cols) {
vertex_count = data.size();
row_ptrs = new uintE[vertex_count + 1];
uintE sum = 0;
for (uintV u = 0; u < data.size(); ++u) {
row_ptrs[u] = sum;
sum += data[u].size();
}
row_ptrs[vertex_count] = sum;
edge_count = sum;
cols = new uintV[edge_count];
for (uintV u = 0; u < data.size(); ++u) {
for (uintE off = 0; off < data[u].size(); ++off) {
cols[row_ptrs[u] + off] = data[u][off];
assert(data[u][off] < vertex_count);
}
}
}
static void WriteDataFile(GraphFormat graph_format, std::string& filename,
bool directed, size_t vertex_count,
size_t edge_count, uintE* row_ptrs, uintV* cols) {
switch (graph_format) {
case SNAP:
WriteCSRSNAPFile(filename, directed, vertex_count, edge_count, row_ptrs,
cols);
break;
case BIN:
WriteCSRBinFile(filename, directed, vertex_count, edge_count, row_ptrs,
cols);
break;
case ADJ:
WriteCSRAdjFile(filename, directed, vertex_count, edge_count, row_ptrs,
cols);
break;
default:
assert(false);
}
}
static void WriteCSRBinFile(std::string& filename, bool directed,
size_t vertex_count, size_t edge_count,
uintE* row_ptrs, uintV* cols) {
TimeMeasurer timer;
timer.StartTimer();
std::cout << "start write csr bin file....";
std::string output_filename = filename + ".bin";
FILE* file_out = fopen(output_filename.c_str(), "wb");
assert(file_out != NULL);
size_t res = 0;
size_t uintV_size = sizeof(uintV), uintE_size = sizeof(uintE);
fwrite(&uintV_size, sizeof(size_t), 1, file_out);
fwrite(&uintE_size, sizeof(size_t), 1, file_out);
fwrite(&vertex_count, sizeof(size_t), 1, file_out);
fwrite(&edge_count, sizeof(size_t), 1, file_out);
for (uintV u = 0; u <= vertex_count; ++u) {
res += fwrite(&row_ptrs[u], sizeof(uintE), 1, file_out);
}
for (uintV u = 0; u < vertex_count; ++u) {
for (uintE j = row_ptrs[u]; j < row_ptrs[u + 1]; ++j) {
auto v = cols[j];
res += fwrite(&v, sizeof(uintV), 1, file_out);
}
}
assert(res == ((vertex_count + 1) + edge_count));
fclose(file_out);
timer.EndTimer();
std::cout << "finish write csr bin file, output_filename="
<< output_filename
<< ",elapsed_time=" << timer.GetElapsedMicroSeconds() / 1000.0
<< "ms" << std::endl;
}
static void WriteCSRSNAPFile(std::string& filename, bool directed,
size_t vertex_count, size_t edge_count,
uintE* row_ptrs, uintV* cols) {
TimeMeasurer timer;
timer.StartTimer();
std::cout << "start write csr snap file....";
std::string output_filename = filename + ".snap.txt";
std::ofstream file(output_filename.c_str(), std::fstream::out);
for (uintV u = 0; u < vertex_count; ++u) {
for (uintE j = row_ptrs[u]; j < row_ptrs[u + 1]; ++j) {
auto v = cols[j];
if (u < v) {
file << u << " " << v << std::endl;
}
}
}
file.close();
timer.EndTimer();
std::cout << "finish write csr snap file, output_filename="
<< output_filename
<< ",elapsed_time=" << timer.GetElapsedMicroSeconds() / 1000.0
<< "ms" << std::endl;
}
// for fennel
static void WriteCSRAdjFile(std::string& filename, bool directed,
size_t vertex_count, size_t edge_count,
uintE* row_ptrs, uintV* cols) {
TimeMeasurer timer;
timer.StartTimer();
std::cout << "start write csr adj file....";
std::string output_filename = filename + ".adj.txt";
std::ofstream file(output_filename.c_str(), std::fstream::out);
for (uintV u = 0; u < vertex_count; ++u) {
file << u << "\t";
for (uintE j = row_ptrs[u]; j < row_ptrs[u + 1]; ++j) {
auto v = cols[j];
file << v << " ";
}
file << std::endl;
}
file.close();
std::string info_output_filename = filename + ".adj.txt.info";
std::ofstream info_file(info_output_filename.c_str(), std::fstream::out);
info_file << vertex_count << " " << edge_count;
info_file.close();
timer.EndTimer();
std::cout << "finish write csr adj file, output_filename="
<< output_filename
<< ",elapsed_time=" << timer.GetElapsedMicroSeconds() / 1000.0
<< "ms" << std::endl;
}
static void SortCSRArray(size_t vertex_count, uintE* row_ptrs, uintV* cols) {
#if defined(OPENMP)
#pragma omp parallel for schedule(dynamic)
for (uintV u = 0; u < vertex_count; ++u) {
std::sort(cols + row_ptrs[u], cols + row_ptrs[u + 1]);
}
#else
std::cout << "start sorting..." << std::endl;
for (uintV u = 0; u < vertex_count; ++u) {
std::sort(cols + row_ptrs[u], cols + row_ptrs[u + 1]);
}
#endif
}
static void Validate(size_t vertex_count, uintE* row_ptrs, uintV* cols) {
// ensure edge list of each vertex is sorted
// ensure no self loops
// ensure no parallel edges
size_t dangling = 0;
size_t self_loops = 0;
size_t parallel_edges = 0;
for (uintV u = 0; u < vertex_count; ++u) {
if (row_ptrs[u] == row_ptrs[u + 1]) {
dangling++;
}
for (uintE j = row_ptrs[u]; j < row_ptrs[u + 1]; ++j) {
auto v = cols[j];
if (u == v) self_loops++;
if (j > row_ptrs[u]) {
if (v == cols[j - 1]) parallel_edges++;
}
}
}
if (dangling)
std::cout << "Warning: dangling_nodes=" << dangling << std::endl;
if (self_loops)
std::cout << "Warning: self_loops=" << self_loops << std::endl;
if (parallel_edges)
std::cout << "Warning: parallel_edges=" << parallel_edges << std::endl;
assert(dangling == 0 && self_loops == 0 && parallel_edges == 0);
}
};
#endif
|
bd_cell.c | #include <stdlib.h>
#include <stdio.h>
#include <unistd.h> // access
#include <math.h>
#include <assert.h>
#include "timer.h"
#include "bd.h"
#include <omp.h>
#define NTHREADS 36
#define M_PI 3.14159265358979323846
#define my_EPS 0.000000001
void get_indices(int index, int *i, int *j, int *k, int b){
int ib, ib2;
ib = index%(b); ib2 = index%(b*b);
*k = ib;
*i = (index-ib2)/(b*b);
*j = (ib2-*k)/b;
return;
}
struct box
{
int head;
};
// it is possible to use smaller boxes and more complex neighbor patterns
#define NUM_BOX_NEIGHBORS 14
int box_neighbors[NUM_BOX_NEIGHBORS][3] =
{
{-1,-1,-1},
{-1,-1, 0},
{-1,-1,+1},
{-1, 0,-1},
{-1, 0, 0},
{-1, 0,+1},
{-1,+1,-1},
{-1,+1, 0},
{-1,+1,+1},
{ 0,-1,-1},
{ 0,-1, 0},
{ 0,-1,+1},
{ 0, 0,-1},
{ 0, 0, 0} // will calculate within the box interactions
};
int bd(int npos, double * restrict pos_orig, double * restrict buf, const int *types, double L, double * restrict pos, int* restrict next, double* restrict forces, double f_const)
{
// Initialisations required for INTERACTION FUNCTION******** NOTE: Can take input to bd itself!!!
double krepul = 100, a=1, a_sq, phi=0.2, f;
a_sq = a*a;
int boxdim;// boxdim is number of cells in L
double cutoff2; int numpairs_p;
cutoff2 = 4;// cutoff < L/boxdim
boxdim =(int)(L/cutoff2)*a;//(int)(L/cutoff2*0.8);
printf("L = %lf cutoff2 = %lf boxdim = %d\n", L, cutoff2, boxdim);
struct box b[boxdim][boxdim][boxdim];
struct box *bp;
struct box *neigh_bp;
// box indices
int idx, idy, idz, index, box2, ib2;
int neigh_idx, neigh_idy, neigh_idz;
// allocate implied linked list
int p1, p2, j, i;
double d2, dx, dy, dz, s;
box2 = boxdim*boxdim;
//*****************************************END initialisations***********************************
if (boxdim < 4 || cutoff2 > (L/boxdim)*(L/boxdim))
{
printf("interactions: bad input parameters\n");
// return 1;
}
double t0, t_init_cells = 0, t_assign_to_cells=0, t_update_pos=0, t_force=0;
for (int step=0; step<INTERVAL_LEN; step++)
{
// Calculation of interaction per time step
t0 = time_in_seconds();
// allocate memory for particles in each box
// #pragma omp parallel for schedule(static) private(idx, idy, idz, ib2) shared(b, boxdim, box2)
// for (index=0; index<boxdim*box2; index++){
// idz = index%(boxdim);
// ib2 = index%(box2);
// idx = (index-ib2)/(box2);
// idy = (ib2-idz)/boxdim;
// b[idx][idy][idz].head=-1;
// }
for (idx=0; idx<boxdim; idx++){
for (idy=0; idy<boxdim; idy++){
for (idz=0; idz<boxdim; idz++){
b[idx][idy][idz].head=-1;
}
}
}
t_init_cells += time_in_seconds()-t0;
t0 = time_in_seconds();
// traverse all particles and assign to boxes
// #pragma omp parallel for schedule(static) private(i, idx, idy, idz, bp) shared(b, next) num_threads(NTHREADS)
for (i=0; i<npos; i++)
{
if (pos_orig[3*i] >= 0){pos[3*i]= fmod(pos_orig[3*i], L);}// OR SINCE PARTICLES moving slowly.. change to -L
else {// pos_orig[i] is negative
pos[3*i] = L-fmod(-1*pos_orig[3*i], L);
}
if (pos_orig[3*i+1] >= 0){pos[3*i+1]= fmod(pos_orig[3*i+1], L);}// OR SINCE PARTICLES moving slowly.. change to -L
else {// pos_orig[i] is negative
pos[3*i+1] = L-fmod(-1*pos_orig[3*i+1], L);
}
if (pos_orig[3*i+2] >= 0){pos[3*i+2]= fmod(pos_orig[3*i+2], L);}// OR SINCE PARTICLES moving slowly.. change to -L
else {// pos_orig[i] is negative
pos[3*i+2] = L-fmod(-1*pos_orig[3*i+2], L);
}
if (pos[3*i]<0){printf("pos_orig = %lf pos defect = %lf and i = %d and L =%lf\n", pos_orig[3*i], pos[3*i], i, L);}
// initialize entry of implied linked list
next[i] = -1;
forces[3*i+0] = 0; forces[3*i+1] = 0; forces[3*i+2] = 0; // re-initialising interaction forces at each time step
// which box does the particle belong to?
// assumes particles have positions within [0,L]^3
idx = (int)(pos[3*i ]/L*boxdim);
idy = (int)(pos[3*i+1]/L*boxdim);
idz = (int)(pos[3*i+2]/L*boxdim);
// add to beginning of implied linked list
bp = &b[idx][idy][idz];
// next[i] = bp->head; // next = previous (my notation)
// #pragma omp critical
// {
next[i] = bp->head; // next = previous (my notation)
bp->head = i; // head = latest (my notation)
// }
}
t_assign_to_cells += time_in_seconds()-t0;
t0 = time_in_seconds();
#pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f, idx, idy, idz, ib2, bp) shared(b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, box2) num_threads(NTHREADS)
for (index=0; index<boxdim*box2; index++){
idz = index%(boxdim);
ib2 = index%(box2);
idx = (index-ib2)/(box2);
idy = (ib2-idz)/boxdim;
bp = &b[idx][idy][idz];
// interactions within and other boxes
#pragma omp parallel for schedule(static) private(j, neigh_idx, neigh_idy, neigh_idz, neigh_bp, p1, p2, dx, dy, dz, d2, s, f) shared(bp, b, box_neighbors, boxdim, L, pos, forces, krepul, a, a_sq, next, idx, idy, idz)// num_threads(NTHREADS)
for (j=0; j<NUM_BOX_NEIGHBORS; j++)
{
neigh_idx = (idx + box_neighbors[j][0] + boxdim) % boxdim;
neigh_idy = (idy + box_neighbors[j][1] + boxdim) % boxdim;
neigh_idz = (idz + box_neighbors[j][2] + boxdim) % boxdim;
neigh_bp = &b[neigh_idx][neigh_idy][neigh_idz];
// when using boxes, the minimum image computation is
// known beforehand, thus we can compute position offsets
// to compensate for wraparound when computing distances
double xoffset = 0.;
double yoffset = 0.;
double zoffset = 0.;
if (idx + box_neighbors[j][0] == -1) xoffset = -L;
if (idy + box_neighbors[j][1] == -1) yoffset = -L;
if (idz + box_neighbors[j][2] == -1) zoffset = -L;
if (idx + box_neighbors[j][0] == boxdim) xoffset = L;
if (idy + box_neighbors[j][1] == boxdim) yoffset = L;
if (idz + box_neighbors[j][2] == boxdim) zoffset = L;
// NOTE: modifying the function to update the forces
p1 = neigh_bp->head;
while (p1 != -1)
{
p2 = bp->head;
while (p2 != -1)
{
// compute distance vector
dx = pos[3*p1+0] - pos[3*p2+0] + xoffset;
dy = pos[3*p1+1] - pos[3*p2+1] + yoffset;
dz = pos[3*p1+2] - pos[3*p2+2] + zoffset;
d2 = dx*dx+dy*dy+dz*dz+my_EPS;
if ( d2<4.0*a_sq)
{
s = sqrt(d2);
f = krepul*(2*a-s);
#pragma omp atomic
forces[3*p1+0] += f*dx/s;
#pragma omp atomic
forces[3*p1+1] += f*dy/s;
#pragma omp atomic
forces[3*p1+2] += f*dz/s;
#pragma omp atomic
forces[3*p2+0] -= f*dx/s;
#pragma omp atomic
forces[3*p2+1] -= f*dy/s;
#pragma omp atomic
forces[3*p2+2] -= f*dz/s;
}
p2 = next[p2];
}
p1 = next[p1];
}
}
}
t_force += time_in_seconds() - t0;
t0 = time_in_seconds();
// generate random values from standard normal distribution
// note: this MKL function is sequential but vectorized
vdRngGaussian(VSL_RNG_METHOD_GAUSSIAN_BOXMULLER, stream, 3*npos, buf, 0., 1.);
// update positions with Brownian displacements
#pragma omp parallel for schedule(static) shared(pos_orig) private(i) num_threads(NTHREADS)
for (int i=0; i<3*npos; i++)
{
pos_orig[i] += forces[i]*DELTAT+f_const*buf[i];
}
t_update_pos += time_in_seconds() - t0;
}
printf("--------------------------------------------------------\n");
printf("Time: %f for initiating the cell head \n", t_init_cells);
printf("Time: %f for assigning particles to cells \n", t_assign_to_cells);
printf("Time: %f for force calculations \n", t_force);
printf("Time: %f for pos update \n", t_update_pos);
printf("--------------------------------------------------------\n");
return 0;
}
|
omp_parallel_sections_reduction.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <math.h>
#include "omp_testsuite.h"
int test_omp_parallel_sections_reduction()
{
int sum;
int known_sum;
double dpt;
double dsum;
double dknown_sum;
double dt=0.5; /* base of geometric row for + and - test*/
double rounding_error= 1.E-5;
int diff;
double ddiff;
int product;
int known_product;
int logic_and;
int bit_and;
int logic_or;
int bit_or;
int exclusiv_bit_or;
int logics[1000];
int i;
int result;
sum = 7;
dsum=0;
product =1;
dpt = 1;
logic_and=1;
bit_and=1;
logic_or=0;
bit_or=0;
exclusiv_bit_or=0;
result =0;
/* int my_islarger;*/
/*int is_larger=1;*/
// Test summation of integers
known_sum = (999*1000)/2+7;
#pragma omp parallel sections private(i) reduction(+:sum)
{
#pragma omp section
{
for (i=1;i<300;i++) {
sum=sum+i;
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
sum=sum+i;
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
sum=sum+i;
}
}
}
if(known_sum!=sum) {
result++;
fprintf(stderr,"Error in sum with integers: Result was %d"
" instead of %d.\n",sum, known_sum);
}
// Test differences of integers
diff = (999*1000)/2;
#pragma omp parallel sections private(i) reduction(-:diff)
{
#pragma omp section
{
for (i=1;i<300;i++) {
diff=diff-i;
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
diff=diff-i;
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
diff=diff-i;
}
}
}
if(diff != 0) {
result++;
fprintf(stderr,"Error in Difference with integers: Result was %d"
" instead of 0.\n",diff);
}
// Test summation of doubles
for (i=0;i<20;++i) {
dpt*=dt;
}
dknown_sum = (1-dpt)/(1-dt);
#pragma omp parallel sections private(i) reduction(+:dsum)
{
#pragma omp section
{
for (i=0;i<6;++i) {
dsum += pow(dt,i);
}
}
#pragma omp section
{
for (i=6;i<12;++i) {
dsum += pow(dt,i);
}
}
#pragma omp section
{
for (i=12;i<20;++i) {
dsum += pow(dt,i);
}
}
}
if( fabs(dsum-dknown_sum) > rounding_error ) {
result++;
fprintf(stderr,"Error in sum with doubles: Result was %f"
" instead of %f (Difference: %E)\n",
dsum, dknown_sum, dsum-dknown_sum);
}
// Test differences of doubles
dpt=1;
for (i=0;i<20;++i) {
dpt*=dt;
}
fprintf(stderr,"\n");
ddiff = (1-dpt)/(1-dt);
#pragma omp parallel sections private(i) reduction(-:ddiff)
{
#pragma omp section
{
for (i=0;i<6;++i) {
ddiff -= pow(dt,i);
}
}
#pragma omp section
{
for (i=6;i<12;++i) {
ddiff -= pow(dt,i);
}
}
#pragma omp section
{
for (i=12;i<20;++i) {
ddiff -= pow(dt,i);
}
}
}
if( fabs(ddiff) > rounding_error) {
result++;
fprintf(stderr,"Error in Difference with doubles: Result was %E"
" instead of 0.0\n",ddiff);
}
// Test product of integers
known_product = 3628800;
#pragma omp parallel sections private(i) reduction(*:product)
{
#pragma omp section
{
for(i=1;i<3;i++) {
product *= i;
}
}
#pragma omp section
{
for(i=3;i<7;i++) {
product *= i;
}
}
#pragma omp section
{
for(i=7;i<11;i++) {
product *= i;
}
}
}
if(known_product != product) {
result++;
fprintf(stderr,"Error in Product with integers: Result was %d"
" instead of %d\n",product,known_product);
}
// Test logical AND
for(i=0;i<1000;i++) {
logics[i]=1;
}
#pragma omp parallel sections private(i) reduction(&&:logic_and)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_and = (logic_and && logics[i]);
}
}
}
if(!logic_and) {
result++;
fprintf(stderr,"Error in logic AND part 1\n");
}
logic_and = 1;
logics[501] = 0;
#pragma omp parallel sections private(i) reduction(&&:logic_and)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_and = (logic_and && logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_and = (logic_and && logics[i]);
}
}
}
if(logic_and) {
result++;
fprintf(stderr,"Error in logic AND part 2");
}
// Test logical OR
for(i=0;i<1000;i++) {
logics[i]=0;
}
#pragma omp parallel sections private(i) reduction(||:logic_or)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_or = (logic_or || logics[i]);
}
}
}
if(logic_or) {
result++;
fprintf(stderr,"Error in logic OR part 1\n");
}
logic_or = 0;
logics[501]=1;
#pragma omp parallel sections private(i) reduction(||:logic_or)
{
#pragma omp section
{
for (i=1;i<300;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=300;i<700;i++) {
logic_or = (logic_or || logics[i]);
}
}
#pragma omp section
{
for (i=700;i<1000;i++) {
logic_or = (logic_or || logics[i]);
}
}
}
if(!logic_or) {
result++;
fprintf(stderr,"Error in logic OR part 2\n");
}
// Test bitwise AND
for(i=0;i<1000;++i) {
logics[i]=1;
}
#pragma omp parallel sections private(i) reduction(&:bit_and)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_and = (bit_and & logics[i]);
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_and = (bit_and & logics[i]);
}
}
}
if(!bit_and) {
result++;
fprintf(stderr,"Error in BIT AND part 1\n");
}
bit_and = 1;
logics[501]=0;
#pragma omp parallel sections private(i) reduction(&:bit_and)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_and = bit_and & logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_and = bit_and & logics[i];
}
}
}
if(bit_and) {
result++;
fprintf(stderr,"Error in BIT AND part 2");
}
// Test bitwise OR
for(i=0;i<1000;i++) {
logics[i]=0;
}
#pragma omp parallel sections private(i) reduction(|:bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_or = bit_or | logics[i];
}
}
}
if(bit_or) {
result++;
fprintf(stderr,"Error in BIT OR part 1\n");
}
bit_or = 0;
logics[501]=1;
#pragma omp parallel sections private(i) reduction(|:bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
bit_or = bit_or | logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
bit_or = bit_or | logics[i];
}
}
}
if(!bit_or) {
result++;
fprintf(stderr,"Error in BIT OR part 2\n");
}
// Test bitwise XOR
for(i=0;i<1000;i++) {
logics[i]=0;
}
#pragma omp parallel sections private(i) reduction(^:exclusiv_bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
}
if(exclusiv_bit_or) {
result++;
fprintf(stderr,"Error in EXCLUSIV BIT OR part 1\n");
}
exclusiv_bit_or = 0;
logics[501]=1;
#pragma omp parallel sections private(i) reduction(^:exclusiv_bit_or)
{
#pragma omp section
{
for(i=0;i<300;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=300;i<700;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
#pragma omp section
{
for(i=700;i<1000;++i) {
exclusiv_bit_or = exclusiv_bit_or ^ logics[i];
}
}
}
if(!exclusiv_bit_or) {
result++;
fprintf(stderr,"Error in EXCLUSIV BIT OR part 2\n");
}
/*printf("\nResult:%d\n",result);*/
return (result==0);
}
int main()
{
int i;
int num_failed=0;
for(i = 0; i < REPETITIONS; i++) {
if(!test_omp_parallel_sections_reduction()) {
num_failed++;
}
}
return num_failed;
}
|
backprop.c | /*
******************************************************************
* HISTORY
* 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University
* Prepared for 15-681, Fall 1994.
* Modified by Shuai Che
******************************************************************
*/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "backprop.h"
#include <math.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#define ABS(x) (((x) > 0.0) ? (x) : (-(x)))
#define fastcopy(to,from,len)\
{\
register char *_to,*_from;\
register int _i,_l;\
_to = (char *)(to);\
_from = (char *)(from);\
_l = (len);\
for (_i = 0; _i < _l; _i++) *_to++ = *_from++;\
}
/*** Return random number between 0.0 and 1.0 ***/
float drnd()
{
return ((float) rand() / (float) BIGRND);
}
/*** Return random number between -1.0 and 1.0 ***/
float dpn1()
{
return ((drnd() * 2.0) - 1.0);
}
/*** The squashing function. Currently, it's a sigmoid. ***/
float squash(float x)
{
float m;
//x = -x;
//m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120;
//return(1.0 / (1.0 + m));
return (1.0 / (1.0 + exp(-x)));
}
/*** Allocate 1d array of floats ***/
float *alloc_1d_dbl(int n)
{
float *new_t;
new_t = (float *) malloc ((unsigned) (n * sizeof (float)));
if (new_t == NULL) {
printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n");
return (NULL);
}
return (new_t);
}
/*** Allocate 2d array of floats ***/
float **alloc_2d_dbl(int m, int n)
{
int i;
float **new_t;
new_t = (float **) malloc ((unsigned) (m * sizeof (float *)));
if (new_t == NULL) {
printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n");
return (NULL);
}
for (i = 0; i < m; i++) {
new_t[i] = alloc_1d_dbl(n);
}
return (new_t);
}
void bpnn_randomize_weights(float **w, int m, int n)
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = (float) rand()/RAND_MAX;
// w[i][j] = dpn1();
}
}
}
void bpnn_randomize_row(float *w, int m)
{
int i;
for (i = 0; i <= m; i++) {
//w[i] = (float) rand()/RAND_MAX;
w[i] = 0.1;
}
}
void bpnn_zero_weights(float **w, int m, int n)
{
int i, j;
for (i = 0; i <= m; i++) {
for (j = 0; j <= n; j++) {
w[i][j] = 0.0;
}
}
}
void bpnn_initialize(int seed)
{
srand(seed);
}
BPNN *bpnn_internal_create(int n_in, int n_hidden, int n_out)
{
BPNN *newnet;
newnet = (BPNN *) malloc (sizeof (BPNN));
if (newnet == NULL) {
printf("BPNN_CREATE: Couldn't allocate neural network\n");
return (NULL);
}
newnet->input_n = n_in;
newnet->hidden_n = n_hidden;
newnet->output_n = n_out;
newnet->input_units = alloc_1d_dbl(n_in + 1);
newnet->hidden_units = alloc_1d_dbl(n_hidden + 1);
newnet->output_units = alloc_1d_dbl(n_out + 1);
newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1);
newnet->output_delta = alloc_1d_dbl(n_out + 1);
newnet->target = alloc_1d_dbl(n_out + 1);
newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1);
newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1);
return (newnet);
}
void bpnn_free(BPNN *net)
{
int n1, n2, i;
n1 = net->input_n;
n2 = net->hidden_n;
free((char *) net->input_units);
free((char *) net->hidden_units);
free((char *) net->output_units);
free((char *) net->hidden_delta);
free((char *) net->output_delta);
free((char *) net->target);
for (i = 0; i <= n1; i++) {
free((char *) net->input_weights[i]);
free((char *) net->input_prev_weights[i]);
}
free((char *) net->input_weights);
free((char *) net->input_prev_weights);
for (i = 0; i <= n2; i++) {
free((char *) net->hidden_weights[i]);
free((char *) net->hidden_prev_weights[i]);
}
free((char *) net->hidden_weights);
free((char *) net->hidden_prev_weights);
free((char *) net);
}
/*** Creates a new fully-connected network from scratch,
with the given numbers of input, hidden, and output units.
Threshold units are automatically included. All weights are
randomly initialized.
Space is also allocated for temporary storage (momentum weights,
error computations, etc).
***/
BPNN *bpnn_create(int n_in, int n_hidden, int n_out)
{
BPNN *newnet;
newnet = bpnn_internal_create(n_in, n_hidden, n_out);
#ifdef INITZERO
bpnn_zero_weights(newnet->input_weights, n_in, n_hidden);
#else
bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden);
#endif
bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out);
bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden);
bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out);
bpnn_randomize_row(newnet->target, n_out);
return (newnet);
}
void bpnn_layerforward(float *l1, float *l2, float **conn, int n1, int n2)
{
float sum;
int j, k;
/*** Set up thresholding unit ***/
l1[0] = 1.0;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static)
#endif
/*** For each unit in second layer ***/
for (j = 1; j <= n2; j++) {
/*** Compute weighted sum of its inputs ***/
sum = 0.0;
for (k = 0; k <= n1; k++) {
sum += conn[k][j] * l1[k];
}
l2[j] = squash(sum);
}
}
void bpnn_output_error(float *delta, float *target, float *output, int nj, float *err)
{
int j;
float o, t, errsum;
errsum = 0.0;
for (j = 1; j <= nj; j++) {
o = output[j];
t = target[j];
delta[j] = o * (1.0 - o) * (t - o);
errsum += ABS(delta[j]);
}
*err = errsum;
}
void bpnn_hidden_error(float *delta_h,
int nh,
float *delta_o,
int no,
float **who,
float *hidden,
float *err)
{
int j, k;
float h, sum, errsum;
errsum = 0.0;
for (j = 1; j <= nh; j++) {
h = hidden[j];
sum = 0.0;
for (k = 1; k <= no; k++) {
sum += delta_o[k] * who[j][k];
}
delta_h[j] = h * (1.0 - h) * sum;
errsum += ABS(delta_h[j]);
}
*err = errsum;
}
void bpnn_adjust_weights(float *delta, int ndelta, float *ly, int nly, float **w, float **oldw)
{
float new_dw;
int k, j;
ly[0] = 1.0;
#ifdef OPEN
omp_set_num_threads(NUM_THREAD);
#pragma omp parallel for \
shared(oldw, w, delta) \
private(j, k, new_dw) \
firstprivate(ndelta, nly, momentum)
#endif
for (j = 1; j <= ndelta; j++) {
for (k = 0; k <= nly; k++) {
new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k][j]));
w[k][j] += new_dw;
oldw[k][j] = new_dw;
}
}
}
void bpnn_feedforward(BPNN *net)
{
int in, hid, out;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
}
void bpnn_train(BPNN *net, float *eo, float *eh)
{
int in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
/*** Feed forward input activations. ***/
bpnn_layerforward(net->input_units, net->hidden_units,
net->input_weights, in, hid);
bpnn_layerforward(net->hidden_units, net->output_units,
net->hidden_weights, hid, out);
/*** Compute error on output and hidden units. ***/
bpnn_output_error(net->output_delta, net->target, net->output_units,
out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out,
net->hidden_weights, net->hidden_units, &hid_err);
*eo = out_err;
*eh = hid_err;
/*** Adjust input and hidden weights. ***/
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid,
net->hidden_weights, net->hidden_prev_weights);
bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in,
net->input_weights, net->input_prev_weights);
}
void bpnn_save(BPNN *net, char *filename)
{
int n1, n2, n3, i, j, memcnt;
float dvalue, **w;
char *mem;
///add//
FILE *pFile;
pFile = fopen( filename, "w+" );
///////
/*
if ((fd = creat(filename, 0644)) == -1) {
printf("BPNN_SAVE: Cannot create '%s'\n", filename);
return;
}
*/
n1 = net->input_n; n2 = net->hidden_n; n3 = net->output_n;
printf("Saving %dx%dx%d network to '%s'\n", n1, n2, n3, filename);
//fflush(stdout);
//write(fd, (char *) &n1, sizeof(int));
//write(fd, (char *) &n2, sizeof(int));
//write(fd, (char *) &n3, sizeof(int));
fwrite( (char *) &n1 , sizeof(char), sizeof(char), pFile);
fwrite( (char *) &n2 , sizeof(char), sizeof(char), pFile);
fwrite( (char *) &n3 , sizeof(char), sizeof(char), pFile);
memcnt = 0;
w = net->input_weights;
mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float)));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
//write(fd, mem, (n1+1) * (n2+1) * sizeof(float));
fwrite( mem , (unsigned)(sizeof(float)), (unsigned) ((n1+1) * (n2+1) * sizeof(float)) , pFile);
free(mem);
memcnt = 0;
w = net->hidden_weights;
mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float)));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
dvalue = w[i][j];
fastcopy(&mem[memcnt], &dvalue, sizeof(float));
memcnt += sizeof(float);
}
}
//write(fd, mem, (n2+1) * (n3+1) * sizeof(float));
fwrite( mem , sizeof(float), (unsigned) ((n2+1) * (n3+1) * sizeof(float)) , pFile);
free(mem);
fclose(pFile);
return;
}
BPNN *bpnn_read(char *filename)
{
char *mem;
BPNN *new_t;
int fd, n1, n2, n3, i, j, memcnt;
if ((fd = open(filename, 0, 0644)) == -1) {
return (NULL);
}
printf("Reading '%s'\n", filename); //fflush(stdout);
read(fd, (char *) &n1, sizeof(int));
read(fd, (char *) &n2, sizeof(int));
read(fd, (char *) &n3, sizeof(int));
new_t = bpnn_internal_create(n1, n2, n3);
printf("'%s' contains a %dx%dx%d network\n", filename, n1, n2, n3);
printf("Reading input weights..."); //fflush(stdout);
memcnt = 0;
mem = (char *) malloc ((unsigned) ((n1+1) * (n2+1) * sizeof(float)));
read(fd, mem, (n1+1) * (n2+1) * sizeof(float));
for (i = 0; i <= n1; i++) {
for (j = 0; j <= n2; j++) {
fastcopy(&(new_t->input_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
printf("Done\nReading hidden weights..."); //fflush(stdout);
memcnt = 0;
mem = (char *) malloc ((unsigned) ((n2+1) * (n3+1) * sizeof(float)));
read(fd, mem, (n2+1) * (n3+1) * sizeof(float));
for (i = 0; i <= n2; i++) {
for (j = 0; j <= n3; j++) {
fastcopy(&(new_t->hidden_weights[i][j]), &mem[memcnt], sizeof(float));
memcnt += sizeof(float);
}
}
free(mem);
close(fd);
printf("Done\n"); //fflush(stdout);
bpnn_zero_weights(new_t->input_prev_weights, n1, n2);
bpnn_zero_weights(new_t->hidden_prev_weights, n2, n3);
return (new_t);
}
|
omp51_task_dep_inoutset.c | // RUN: %libomp-compile-and-run
// RUN: %libomp-cxx-compile-and-run
// UNSUPPORTED: gcc
// Tests OMP 5.0 task dependences "mutexinoutset" and 5.1 "inoutset",
// emulates compiler codegen for new dep kinds
// Mutually exclusive tasks get same input dependency info array
//
// Task tree created:
// task0 - task1 (in)
// \
// task2 - task3 (inoutset)
// /
// task3 - task4 (in)
// /
// task6 <-->task7 (mutexinoutset)
// \ /
// task8 (in)
//
#include <stdio.h>
#include <omp.h>
#ifdef _WIN32
#include <windows.h>
#define mysleep(n) Sleep(n)
#else
#include <unistd.h>
#define mysleep(n) usleep((n)*1000)
#endif
// to check the # of concurrent tasks (must be 1 for MTX, <3 for other kinds)
static int volatile checker = 0;
static int err = 0;
#ifndef DELAY
#define DELAY 100
#endif
// ---------------------------------------------------------------------------
// internal data to emulate compiler codegen
typedef struct DEP {
size_t addr;
size_t len;
unsigned char flags;
} dep;
typedef struct task {
void** shareds;
void* entry;
int part_id;
void* destr_thunk;
int priority;
long long device_id;
int f_priv;
} task_t;
#define TIED 1
typedef int(*entry_t)(int, task_t*);
typedef struct ID {
int reserved_1;
int flags;
int reserved_2;
int reserved_3;
char *psource;
} id;
// thunk routine for tasks with MTX dependency
int thunk_m(int gtid, task_t* ptask) {
int th = omp_get_thread_num();
#pragma omp atomic
++checker;
printf("task _%d, th %d\n", ptask->f_priv, th);
if (checker != 1) { // no more than 1 task at a time
err++;
printf("Error1, checker %d != 1\n", checker);
}
mysleep(DELAY);
if (checker != 1) { // no more than 1 task at a time
err++;
printf("Error2, checker %d != 1\n", checker);
}
#pragma omp atomic
--checker;
return 0;
}
// thunk routine for tasks with inoutset dependency
int thunk_s(int gtid, task_t* ptask) {
int th = omp_get_thread_num();
#pragma omp atomic
++checker;
printf("task _%d, th %d\n", ptask->f_priv, th);
if (checker > 2) { // no more than 2 tasks concurrently
err++;
printf("Error1, checker %d > 2\n", checker);
}
mysleep(DELAY);
if (checker > 2) { // no more than 2 tasks concurrently
err++;
printf("Error2, checker %d > 2\n", checker);
}
#pragma omp atomic
--checker;
return 0;
}
#ifdef __cplusplus
extern "C" {
#endif
int __kmpc_global_thread_num(id*);
extern task_t* __kmpc_omp_task_alloc(id *loc, int gtid, int flags,
size_t sz, size_t shar, entry_t rtn);
int
__kmpc_omp_task_with_deps(id *loc, int gtid, task_t *task, int nd, dep *dep_lst,
int nd_noalias, dep *noalias_dep_lst);
static id loc = {0, 2, 0, 0, ";file;func;0;0;;"};
#ifdef __cplusplus
} // extern "C"
#endif
// End of internal data
// ---------------------------------------------------------------------------
int main()
{
int i1,i2,i3;
omp_set_num_threads(4);
omp_set_dynamic(0);
#pragma omp parallel
{
#pragma omp single nowait
{
dep sdep[2];
task_t *ptr;
int gtid = __kmpc_global_thread_num(&loc);
int t = omp_get_thread_num();
#pragma omp task depend(in: i1, i2)
{ int th = omp_get_thread_num();
printf("task 0_%d, th %d\n", t, th);
#pragma omp atomic
++checker;
if (checker > 2) { // no more than 2 tasks concurrently
err++;
printf("Error1, checker %d > 2\n", checker);
}
mysleep(DELAY);
if (checker > 2) { // no more than 2 tasks concurrently
err++;
printf("Error1, checker %d > 2\n", checker);
}
#pragma omp atomic
--checker;
}
#pragma omp task depend(in: i1, i2)
{ int th = omp_get_thread_num();
printf("task 1_%d, th %d\n", t, th);
#pragma omp atomic
++checker;
if (checker > 2) { // no more than 2 tasks concurrently
err++;
printf("Error1, checker %d > 2\n", checker);
}
mysleep(DELAY);
if (checker > 2) { // no more than 2 tasks concurrently
err++;
printf("Error1, checker %d > 2\n", checker);
}
#pragma omp atomic
--checker;
}
// compiler codegen start
// task2
ptr = __kmpc_omp_task_alloc(&loc, gtid, TIED, sizeof(task_t), 0, thunk_s);
sdep[0].addr = (size_t)&i1;
sdep[0].len = 0; // not used
sdep[0].flags = 1; // IN
sdep[1].addr = (size_t)&i2;
sdep[1].len = 0; // not used
sdep[1].flags = 8; // INOUTSET
ptr->f_priv = t + 10; // init single first-private variable
__kmpc_omp_task_with_deps(&loc, gtid, ptr, 2, sdep, 0, 0);
// task3
ptr = __kmpc_omp_task_alloc(&loc, gtid, TIED, sizeof(task_t), 0, thunk_s);
ptr->f_priv = t + 20; // init single first-private variable
__kmpc_omp_task_with_deps(&loc, gtid, ptr, 2, sdep, 0, 0);
// compiler codegen end
t = omp_get_thread_num();
#pragma omp task depend(in: i1, i2)
{ int th = omp_get_thread_num();
printf("task 4_%d, th %d\n", t, th);
#pragma omp atomic
++checker;
if (checker > 2) { // no more than 2 tasks concurrently
err++;
printf("Error1, checker %d > 2\n", checker);
}
mysleep(DELAY);
if (checker > 2) { // no more than 2 tasks concurrently
err++;
printf("Error1, checker %d > 2\n", checker);
}
#pragma omp atomic
--checker;
}
#pragma omp task depend(in: i1, i2)
{ int th = omp_get_thread_num();
printf("task 5_%d, th %d\n", t, th);
#pragma omp atomic
++checker;
if (checker > 2) { // no more than 2 tasks concurrently
err++;
printf("Error1, checker %d > 2\n", checker);
}
mysleep(DELAY);
if (checker > 2) { // no more than 2 tasks concurrently
err++;
printf("Error1, checker %d > 2\n", checker);
}
#pragma omp atomic
--checker;
}
// compiler codegen start
// task6
ptr = __kmpc_omp_task_alloc(&loc, gtid, TIED, sizeof(task_t), 0, thunk_m);
sdep[0].addr = (size_t)&i1;
sdep[0].len = 0; // not used
sdep[0].flags = 4; // MUTEXINOUTSET
sdep[1].addr = (size_t)&i3;
sdep[1].len = 0; // not used
sdep[1].flags = 4; // MUTEXINOUTSET
ptr->f_priv = t + 30; // init single first-private variable
__kmpc_omp_task_with_deps(&loc, gtid, ptr, 2, sdep, 0, 0);
// task7
ptr = __kmpc_omp_task_alloc(&loc, gtid, TIED, sizeof(task_t), 0, thunk_m);
ptr->f_priv = t + 40; // init single first-private variable
__kmpc_omp_task_with_deps(&loc, gtid, ptr, 2, sdep, 0, 0);
// compiler codegen end
#pragma omp task depend(in: i3)
{ int th = omp_get_thread_num();
printf("task 8_%d, th %d\n", t, th);
#pragma omp atomic
++checker;
if (checker != 1) { // last task should run exclusively
err++;
printf("Error1, checker %d != 1\n", checker); }
mysleep(DELAY);
if (checker != 1) { // last task should run exclusively
err++;
printf("Error1, checker %d != 1\n", checker); }
#pragma omp atomic
--checker;
}
} // single
} // parallel
if (err == 0) {
printf("passed\n");
return 0;
} else {
printf("failed\n");
return 1;
}
}
|
coreFLT.c | #ifdef DT32
#define flt float
#define DT_CALC DT_FLOAT32
#define epsilon FLT_EPSILON
#else
#define flt double
#define DT_CALC DT_FLOAT64
#define epsilon DBL_EPSILON
#endif
//#include <float.h>
#include "core.h"
#ifdef USING_TIMERS
#include <time.h>
#endif
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <math.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#ifdef USING_WASM
#undef SIMD
#define staticx
#include <string.h>
#include <stdbool.h>
#include <nifti2_wasm.h>
#include <emscripten.h>
#include <ctype.h>//#include <cctype>// <ctype.h> for isspace()
#include <stdbool.h>
#else
#define SIMD
#define xmemcpy memcpy
#define staticx static
#include <nifti2_io.h>
#define bandpass
#define bwlabelx
#define tensor_decomp //tensor_decomp support is optional
#endif
#ifdef SIMD //explicitly vectorize (SSE,AVX,Neon)
#ifdef __x86_64__
#ifdef DT32
#define kSSE32 4 //128-bit SSE handles 4 32-bit floats per instruction
#else
#define kSSE64 2 //128-bit SSE handles 2 64-bit floats per instruction
#endif
#else
#ifdef DT32
#include "sse2neon.h"
#define kSSE32 4 //128-bit SSE handles 4 32-bit floats per instruction
#else
#undef SIMD
#endif
#endif
#endif
#ifndef USING_WASM
#ifdef __x86_64__
#include <immintrin.h>
#else
#include "arm_malloc.h"
#endif
#endif
#ifdef bandpass
#include "bw.h"
#endif
#ifdef bwlabelx
#include "bwlabel.h"
#endif
#ifdef tensor_decomp
#include "tensor.h"
#endif
//#define TFCE //formerly we used Christian Gaser's tfce, new bespoke code handles connectivity
//#ifdef TFCE //we now use in-built tfce function
// #include "tfce_pthread.h"
//#endif
#ifdef SIMD
#ifdef DT32
staticx void nifti_sqrt(flt *v, size_t n) {
flt *vin = v;
//#pragma omp parallel for
for (size_t i = 0; i <= (n - kSSE32); i += kSSE32) {
__m128 v4 = _mm_loadu_ps(vin);
__m128 ma = _mm_sqrt_ps(v4);
_mm_storeu_ps(vin, ma);
vin += kSSE32;
}
int tail = (n % kSSE32);
while (tail > 0) {
v[n - tail] = sqrt(v[n - tail]);
tail--;
}
} // nifti_sqrt()
staticx void nifti_mul(flt *v, size_t n, flt slope1) {
flt *vin = v;
__m128 slope = _mm_set1_ps(slope1);
//#pragma omp parallel for
for (size_t i = 0; i <= (n - kSSE32); i += kSSE32) {
__m128 v4 = _mm_loadu_ps(vin);
__m128 m = _mm_mul_ps(v4, slope);
_mm_storeu_ps(vin, m);
vin += kSSE32;
}
int tail = (n % kSSE32);
while (tail > 0) {
v[n - tail] *= slope1;
tail--;
}
} //nifti_mul()
staticx void nifti_add(flt *v, int64_t n, flt intercept1) {
//add, out = in + intercept
if (intercept1 == 0.0f)
return;
flt *vin = v;
__m128 intercept = _mm_set1_ps(intercept1);
//#pragma omp parallel for
for (int64_t i = 0; i <= (n - kSSE32); i += kSSE32) {
__m128 v4 = _mm_loadu_ps(vin);
__m128 ma = _mm_add_ps(v4, intercept);
_mm_storeu_ps(vin, ma);
vin += kSSE32;
}
int tail = (n % kSSE32);
while (tail > 0) {
v[n - tail] = v[n - tail] + intercept1;
tail--;
}
} //nifti_add()
staticx void nifti_fma(flt *v, int64_t n, flt slope1, flt intercept1) {
//multiply+add, out = in * slope + intercept
if ((slope1 == 1.0f) && (intercept1 == 0.0f))
return;
flt *vin = v;
__m128 intercept = _mm_set1_ps(intercept1);
__m128 slope = _mm_set1_ps(slope1);
//#pragma omp parallel for
for (int64_t i = 0; i <= (n - kSSE32); i += kSSE32) {
__m128 v4 = _mm_loadu_ps(vin);
__m128 m = _mm_mul_ps(v4, slope);
__m128 ma = _mm_add_ps(m, intercept);
_mm_storeu_ps(vin, ma);
vin += kSSE32;
}
int tail = (n % kSSE32);
while (tail > 0) {
v[n - tail] = (v[n - tail] * slope1) + intercept1;
tail--;
}
} //nifti_fma()
#else //if SIMD32 else SIMD64
staticx void nifti_sqrt(flt *v, size_t n) {
flt *vin = v;
//#pragma omp parallel for
for (size_t i = 0; i <= (n - kSSE64); i += kSSE64) {
__m128d v2 = _mm_loadu_pd(vin);
__m128d ma = _mm_sqrt_pd(v2);
_mm_storeu_pd(vin, ma);
vin += kSSE64;
}
int tail = (n % kSSE64);
while (tail > 0) {
v[n - tail] = sqrt(v[n - tail]);
tail--;
}
} // nifti_sqrt()
staticx void nifti_mul(flt *v, size_t n, flt slope1) {
flt *vin = v;
__m128d slope = _mm_set1_pd(slope1);
//#pragma omp parallel for
for (size_t i = 0; i <= (n - kSSE64); i += kSSE64) {
__m128d v2 = _mm_loadu_pd(vin);
__m128d m = _mm_mul_pd(v2, slope);
_mm_storeu_pd(vin, m);
vin += kSSE64;
}
int tail = (n % kSSE64);
while (tail > 0) {
v[n - tail] *= slope1;
tail--;
}
} //nifti_mul()
staticx void nifti_add(flt *v, int64_t n, flt intercept1) {
//add, out = in + intercept
if (intercept1 == 0.0f)
return;
flt *vin = v;
__m128d intercept = _mm_set1_pd(intercept1);
//#pragma omp parallel for
for (int64_t i = 0; i <= (n - kSSE64); i += kSSE64) {
__m128d v2 = _mm_loadu_pd(vin);
__m128d ma = _mm_add_pd(v2, intercept);
_mm_storeu_pd(vin, ma);
vin += kSSE64;
}
int tail = (n % kSSE64);
while (tail > 0) {
v[n - tail] = v[n - tail] + intercept1;
tail--;
}
} //nifti_add()
staticx void nifti_fma(flt *v, int64_t n, flt slope1, flt intercept1) {
//multiply+add, out = in * slope + intercept
if ((slope1 == 1.0f) && (intercept1 == 0.0f))
return;
flt *vin = v;
__m128d intercept = _mm_set1_pd(intercept1);
__m128d slope = _mm_set1_pd(slope1);
//#pragma omp parallel for
for (int64_t i = 0; i <= (n - kSSE64); i += kSSE64) {
__m128d v2 = _mm_loadu_pd(vin);
__m128d m = _mm_mul_pd(v2, slope);
__m128d ma = _mm_add_pd(m, intercept);
_mm_storeu_pd(vin, ma);
vin += kSSE64;
}
int tail = (n % kSSE64);
while (tail > 0) {
v[n - tail] = (v[n - tail] * slope1) + intercept1;
tail--;
}
} //nifti_fma()
#endif //end SIMD64
#else //if SIMD vectorized, else scalar
staticx void nifti_sqrt(flt *v, size_t n) {
//#pragma omp parallel for
for (size_t i = 0; i < n; i++)
v[i] = sqrt(v[i]);
} //nifti_sqrt()
staticx void nifti_mul(flt *v, size_t n, flt slope1) {
//#pragma omp parallel for
for (size_t i = 0; i < n; i++)
v[i] *= slope1;
} //nifti_mul()
staticx void nifti_add(flt *v, size_t n, flt intercept1) {
//#pragma omp parallel for
for (size_t i = 0; i < n; i++)
v[i] += intercept1;
} //nifti_add()
staticx void nifti_fma(flt *v, size_t n, flt slope1, flt intercept1) {
//#pragma omp parallel for
for (size_t i = 0; i < n; i++)
v[i] = (v[i] * slope1) + intercept1;
} //nifti_fma
#endif //if vector SIMD else scalar
staticx flt vx(flt *f, int p, int q) {
flt ret = ((f[q] + q * q) - (f[p] + p * p)) / (2.0 * q - 2.0 * p);
if (isnan(ret))
ret = INFINITY;
return ret;
}
staticx inline void transposeXY( flt *img3Din, flt *img3Dout, int *nxp, int *nyp, int nz) {
//transpose X and Y dimensions: rows <-> columns
//Note: in future we could use SIMD to transpose values in tiles
// https://stackoverflow.com/questions/16737298/what-is-the-fastest-way-to-transpose-a-matrix-in-c
int nx = *nxp;
int ny = *nyp;
size_t vi = 0; //volume offset
for (int z = 0; z < nz; z++) {
int zo = z * nx * ny;
for (int y = 0; y < ny; y++) {
int xo = 0;
for (int x = 0; x < nx; x++) {
img3Dout[zo + xo + y] = img3Din[vi];
xo += ny;
vi += 1;
}
}
}
*nxp = ny;
*nyp = nx;
}
staticx inline void transposeXZ( flt *img3Din, flt *img3Dout, int *nxp, int ny, int *nzp) {
//transpose X and Z dimensions: slices <-> columns
int nx = *nxp;
int nz = *nzp;
int nyz = ny * nz;
size_t vi = 0; //volume offset
for (int z = 0; z < nz; z++) {
for (int y = 0; y < ny; y++) {
int yo = y * nz;
int zo = 0;
for (int x = 0; x < nx; x++) {
img3Dout[z + yo + zo] = img3Din[vi];
zo += nyz;
vi += 1;
}
}
}
*nxp = nz;
*nzp = nx;
}
staticx void edt(flt *f, int n) {
int q, p, k;
flt s, dx;
flt *d = (flt *)_mm_malloc((n+2) * sizeof(flt), 64);
flt *z = (flt *)_mm_malloc((n+2) * sizeof(flt), 64);
int *v = (int *)_mm_malloc((n+2) * sizeof(int), 64);
/*# Find the lower envelope of a sequence of parabolas.
# f...source data (returns the Y of the parabola vertex at X)
# d...destination data (final distance values are written here)
# z...temporary used to store X coords of parabola intersections
# v...temporary used to store X coords of parabola vertices
# i...resulting X coords of parabola vertices
# n...number of pixels in "f" to process
# Always add the first pixel to the enveloping set since it is
# obviously lower than all parabolas processed so far.*/
k = 0;
v[0] = 0;
z[0] = -INFINITY;
z[1] = INFINITY;
for (q = 1; q < n; q++) {
/* If the new parabola is lower than the right-most parabola in
# the envelope, remove it from the envelope. To make this
# determination, find the X coordinate of the intersection (s)
# between the parabolas with vertices at (q,f[q]) and (p,f[p]).*/
p = v[k];
s = vx(f, p, q);
//while (s <= z[k]) {
while ((s <= z[k]) && (k > 0)) {
k = k - 1;
p = v[k];
s = vx(f, p, q);
}
//# Add the new parabola to the envelope.
k = k + 1;
v[k] = q;
z[k] = s;
z[k + 1] = INFINITY;
}
/*# Go back through the parabolas in the envelope and evaluate them
# in order to populate the distance values at each X coordinate.*/
k = 0;
for (q = 0; q < n; q++) {
while (z[k + 1] < q)
k = k + 1;
dx = (q - v[k]);
d[q] = dx * dx + f[v[k]];
}
for (q = 0; q < n; q++)
f[q] = d[q];
_mm_free(d);
_mm_free(z);
_mm_free(v);
} //edt()
staticx void edt1(flt *df, int n) { //first dimension is simple
int q, prevX;
flt prevY, v;
prevX = 0;
prevY = INFINITY;
//forward
for (q = 0; q < n; q++) {
if (df[q] == 0) {
prevX = q;
prevY = 0;
} else
df[q] = sqr(q - prevX) + prevY;
}
//reverse
prevX = n;
prevY = INFINITY;
for (q = (n - 1); q >= 0; q--) {
v = sqr(q - prevX) + prevY;
if (df[q] < v) {
prevX = q;
prevY = df[q];
} else
df[q] = v;
}
} //edt1()
staticx int nifti_edt(nifti_image *nim) {
//https://github.com/neurolabusc/DistanceFields
if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt *img = (flt *)nim->data;
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
int nVol = nim->nvox / nvox3D;
if ((nVol < 1) || ((nvox3D * nVol) != nim->nvox))
return 1;
int nx = nim->nx;
int ny = nim->ny;
int nz = nim->nz;
flt threshold = 0.0;
for (size_t i = 0; i < nim->nvox; i++) {
if (img[i] > threshold)
img[i] = INFINITY;
else
img[i] = 0;
}
size_t nRow = nim->nx;
nRow *= MAX(nim->ny, 1);
nRow *= MAX(nim->nz, 1);
nRow *= MAX(nVol, 1);
//EDT in left-right direction
flt *imgRow = img;
for (int r = 0; r < nRow; r++)
edt1(imgRow += nx, nx);
//EDT in anterior-posterior direction
nRow = nim->nx * nim->nz; //transpose XYZ to YXZ and blur Y columns with XZ Rows
for (int v = 0; v < nVol; v++) { //transpose each volume separately
flt *img3D = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //alloc for each volume to allow openmp
size_t vo = v * nvox3D; //volume offset
transposeXY(&img[vo], img3D, &nx, &ny, nz);
//perform EDT for all "rows"
flt *imgRow = img3D;
for (int r = 0; r < nRow; r++)
edt(imgRow += nx, nx);
transposeXY(img3D, &img[vo], &nx, &ny, nz);
_mm_free(img3D);
} //for each volume
//EDT in head-foot direction
nRow = nim->nx * nim->ny; //transpose XYZ to ZXY and blur Z columns with XY Rows
#pragma omp parallel for
for (int v = 0; v < nVol; v++) { //transpose each volume separately
flt *img3D = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //alloc for each volume to allow openmp
size_t vo = v * nvox3D; //volume offset
transposeXZ(&img[vo], img3D, &nx, ny, &nz);
//perform EDT for all "rows"
flt *imgRow = img3D;
for (int r = 0; r < nRow; r++)
edt(imgRow += nx, nx);
transposeXZ(img3D, &img[vo], &nx, ny, &nz);
_mm_free(img3D);
} //for each volume
return 0;
} //nifti_edt()
//kernelWid influences width of kernel, use negative values for round, positive for ceil
// kenrnelWid of 2.5 means the kernel will be (2 * ceil(2.5 * sigma))+1 voxels wide
// kenrnelWid of -6.0 means the kernel will be (2 * round(6.0 * sigma))+1 voxels wide
// 2.5 AFNI ceil(2.5) https://github.com/afni/afni/blob/25e77d564f2c67ff480fa99a7b8e48ec2d9a89fc/src/edt_blur.c#L1391
// -6 SPM round(6) https://github.com/spm/spm12/blob/3085dac00ac804adb190a7e82c6ef11866c8af02/spm_smooth.m#L97
// -6 FSL round(6) (estimated)
// -3 opencv round(3) or round(4) depending on datatype https://github.com/opencv/opencv/blob/9c23f2f1a682faa9f0b2c2223a857c7d93ba65a6/modules/imgproc/src/smooth.cpp#L3782
//bioimagesuite floor(1.5) https://github.com/bioimagesuiteweb/bisweb/blob/210d678c92fd404287fe5766136379ec94750eb2/js/utilities/bis_imagesmoothreslice.js#L133
//Gaussian blur, both serial and parallel variants, https://github.com/neurolabusc/niiSmooth
staticx void blurS(flt *img, int nx, int ny, flt xmm, flt Sigmamm, flt kernelWid) {
//serial blur
//make kernels
if ((xmm == 0) || (nx < 2) || (ny < 1) || (Sigmamm <= 0.0))
return;
//flt sigma = (FWHMmm/xmm)/sqrt(8*log(2));
flt sigma = (Sigmamm / xmm); //mm to vox
//round(6*sigma), ceil(4*sigma) seems spot on larger than fslmaths
//int cutoffvox = round(6*sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5
int cutoffvox;
if (kernelWid < 0)
cutoffvox = round(fabs(kernelWid) * sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5
else
cutoffvox = ceil(kernelWid * sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5
//printf(".Blur Cutoff (%g) %d\n", 4*sigma, cutoffvox);
//validated on SPM12's 1.5mm isotropic mask_ICV.nii (discrete jump in number of non-zero voxels)
//fslmaths mask -s 2.26 f6.nii //Blur Cutoff (6.02667) 7
//fslmaths mask -s 2.24 f4.nii //Blur Cutoff (5.97333) 6
cutoffvox = MAX(cutoffvox, 1);
flt *k = (flt *)_mm_malloc((cutoffvox + 1) * sizeof(flt), 64); //FIR Gaussian
flt expd = 2 * sigma * sigma;
for (int i = 0; i <= cutoffvox; i++)
k[i] = exp(-1.0f * (i * i) / expd);
//calculate start, end for each voxel in
int *kStart = (int *)_mm_malloc(nx * sizeof(int), 64); //-cutoff except left left columns, e.g. 0, -1, -2... cutoffvox
int *kEnd = (int *)_mm_malloc(nx * sizeof(int), 64); //+cutoff except right columns
flt *kWeight = (flt *)_mm_malloc(nx * sizeof(flt), 64); //ensure sum of kernel = 1.0
for (int i = 0; i < nx; i++) {
kStart[i] = MAX(-cutoffvox, -i); //do not read below 0
kEnd[i] = MIN(cutoffvox, nx - i - 1); //do not read beyond final columnn
if ((i > 0) && (kStart[i] == (kStart[i - 1])) && (kEnd[i] == (kEnd[i - 1]))) { //reuse weight
kWeight[i] = kWeight[i - 1];
continue;
}
flt wt = 0.0f;
for (int j = kStart[i]; j <= kEnd[i]; j++)
wt += k[abs(j)];
kWeight[i] = 1 / wt;
//printf("%d %d->%d %g\n", i, kStart[i], kEnd[i], kWeight[i]);
}
//apply kernel to each row
flt *tmp = _mm_malloc(nx * sizeof(flt), 64); //input values prior to blur
for (int y = 0; y < ny; y++) {
//printf("-+ %d:%d\n", y, ny);
xmemcpy(tmp, img, nx * sizeof(flt));
for (int x = 0; x < nx; x++) {
flt sum = 0;
for (int i = kStart[x]; i <= kEnd[x]; i++)
sum += tmp[x + i] * k[abs(i)];
img[x] = sum * kWeight[x];
}
img += nx;
} //blurX
//free kernel
_mm_free(tmp);
_mm_free(k);
_mm_free(kStart);
_mm_free(kEnd);
_mm_free(kWeight);
} //blurS()
#if defined(_OPENMP)
staticx void blurP(flt *img, int nx, int ny, flt xmm, flt FWHMmm, flt kernelWid) {
//parallel blur
//make kernels
if ((xmm == 0) || (nx < 2) || (ny < 1) || (FWHMmm <= 0.0))
return;
//flt sigma = (FWHMmm/xmm)/sqrt(8*log(2));
flt sigma = (FWHMmm / xmm); //mm to vox
int cutoffvox;
if (kernelWid < 0)
cutoffvox = round(fabs(kernelWid) * sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5
else
cutoffvox = ceil(kernelWid * sigma); //filter width to 6 sigma: faster but lower precision AFNI_BLUR_FIRFAC = 2.5
cutoffvox = MAX(cutoffvox, 1);
flt *k = (flt *)_mm_malloc((cutoffvox + 1) * sizeof(flt), 64); //FIR Gaussian
flt expd = 2 * sigma * sigma;
for (int i = 0; i <= cutoffvox; i++)
k[i] = exp(-1.0f * (i * i) / expd);
//calculate start, end for each voxel in
int *kStart = (int *)_mm_malloc(nx * sizeof(int), 64); //-cutoff except left left columns, e.g. 0, -1, -2... cutoffvox
int *kEnd = (int *)_mm_malloc(nx * sizeof(int), 64); //+cutoff except right columns
flt *kWeight = (flt *)_mm_malloc(nx * sizeof(flt), 64); //ensure sum of kernel = 1.0
for (int i = 0; i < nx; i++) {
kStart[i] = MAX(-cutoffvox, -i); //do not read below 0
kEnd[i] = MIN(cutoffvox, nx - i - 1); //do not read beyond final columnn
if ((i > 0) && (kStart[i] == (kStart[i - 1])) && (kEnd[i] == (kEnd[i - 1]))) { //reuse weight
kWeight[i] = kWeight[i - 1];
continue;
}
flt wt = 0.0f;
for (int j = kStart[i]; j <= kEnd[i]; j++)
wt += k[abs(j)];
kWeight[i] = 1 / wt;
//printf("%d %d->%d %g\n", i, kStart[i], kEnd[i], kWeight[i]);
}
//apply kernel to each row
#pragma omp parallel for
for (int y = 0; y < ny; y++) {
flt *tmp = _mm_malloc(nx * sizeof(flt), 64); //input values prior to blur
flt *imgx = img;
imgx += (nx * y);
xmemcpy(tmp, imgx, nx * sizeof(flt));
for (int x = 0; x < nx; x++) {
flt sum = 0;
for (int i = kStart[x]; i <= kEnd[x]; i++)
sum += tmp[x + i] * k[abs(i)];
imgx[x] = sum * kWeight[x];
}
_mm_free(tmp);
}
//free kernel
_mm_free(k);
_mm_free(kStart);
_mm_free(kEnd);
_mm_free(kWeight);
} //blurP
#endif // if OPENMP: blurP (parallel blur) is multi-threaded
staticx int nifti_smooth_gauss(nifti_image *nim, flt SigmammX, flt SigmammY, flt SigmammZ, flt kernelWid) {
//https://github.com/afni/afni/blob/699775eba3c58c816d13947b81cf3a800cec606f/src/edt_blur.c
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
if ((nvox3D < 2) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1) || (nim->datatype != DT_CALC)) {
printfx("Image size too small for Gaussian blur.\n");
return 1;
}
if (nim->datatype != DT_CALC)
return 1;
if ((SigmammX == 0) && (SigmammY == 0) && (SigmammZ == 0))
return 0; //all done: no smoothing, e.g. small kernel for difference of Gaussian
if (SigmammX < 0) //negative values for voxels, not mm
SigmammX = -SigmammX * nim->dx;
if (SigmammY < 0) //negative values for voxels, not mm
SigmammY = -SigmammY * nim->dy;
if (SigmammZ < 0) //negative values for voxels, not mm
SigmammZ = -SigmammZ * nim->dz;
flt *img = (flt *)nim->data;
int nVol = nim->nvox / nvox3D;
if ((nVol < 1) || ((nvox3D * nVol) != nim->nvox))
return 1;
int nx = nim->nx;
int ny = nim->ny;
int nz = nim->nz;
if ((SigmammX <= 0.0) || (nx < 2))
goto DO_Y_BLUR;
//BLUR X
size_t nRow = MAX(nim->ny, 1);
nRow *= MAX(nim->nz, 1);
nRow *= MAX(nVol, 1);
#if defined(_OPENMP)
if (omp_get_max_threads() > 1)
blurP(img, nim->nx, nRow, nim->dx, SigmammX, kernelWid);
else
blurS(img, nim->nx, nRow, nim->dx, SigmammX, kernelWid);
#else
blurS(img, nim->nx, nRow, nim->dx, SigmammX, kernelWid);
#endif
DO_Y_BLUR:
//BLUR Y
if ((SigmammY <= 0.0) || (ny < 2))
goto DO_Z_BLUR;
nRow = nim->nx * nim->nz; //transpose XYZ to YXZ and blur Y columns with XZ Rows
#pragma omp parallel for
for (int v = 0; v < nVol; v++) { //transpose each volume separately
flt *img3D = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //alloc for each volume to allow openmp
size_t vo = v * nvox3D; //volume offset
transposeXY(&img[vo], img3D, &nx, &ny, nz);
blurS(img3D, nim->ny, nRow, nim->dy, SigmammY, kernelWid);
transposeXY(img3D, &img[vo], &nx, &ny, nz);
_mm_free(img3D);
} //for each volume
DO_Z_BLUR:
//BLUR Z:
if ((SigmammZ <= 0.0) || (nim->nz < 2))
return 0; //all done!
nRow = nim->nx * nim->ny; //transpose XYZ to ZXY and blur Z columns with XY Rows
#pragma omp parallel for
for (int v = 0; v < nVol; v++) { //transpose each volume separately
//printf("volume %d uses thread %d\n", v, omp_get_thread_num());
flt *img3D = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //alloc for each volume to allow openmp
size_t vo = v * nvox3D; //volume offset
transposeXZ(&img[vo], img3D, &nx, ny, &nz);
blurS(img3D, nim->nz, nRow, nim->dz, SigmammZ, kernelWid);
transposeXZ(img3D, &img[vo], &nx, ny, &nz);
_mm_free(img3D);
} //for each volume
return 0;
} // nifti_smooth_gauss()
staticx int nifti_robust_range(nifti_image *nim, flt *pct2, flt *pct98, int ignoreZeroVoxels) {
//https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;31f309c1.1307
// robust range is essentially the 2nd and 98th percentiles
// "but ensuring that the majority of the intensity range is captured, even for binary images."
// fsl uses 1000 bins, also limits for volumes less than 100 voxels taylor.hanayik@ndcn.ox.ac.uk 20190107
//fslstats trick -r
// 0.000000 1129.141968
//niimath >fslstats trick -R
// 0.000000 2734.000000
*pct2 = 0.0;
*pct98 = 1.0;
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt *f32 = (flt *)nim->data;
flt mn = INFINITY;
flt mx = -INFINITY;
size_t nZero = 0;
size_t nNan = 0;
for (size_t i = 0; i < nim->nvox; i++) {
if (isnan(f32[i])) {
nNan++;
continue;
}
if (f32[i] == 0.0) {
nZero++;
if (ignoreZeroVoxels)
continue;
}
mn = fmin(f32[i], mn);
mx = fmax(f32[i], mx);
}
if ((nZero > 0) && (mn > 0.0) && (!ignoreZeroVoxels))
mn = 0.0;
if (mn > mx)
return 0; //all NaN
if (mn == mx) {
*pct2 = mn;
*pct98 = mx;
return 0;
}
if (!ignoreZeroVoxels)
nZero = 0;
nZero += nNan;
size_t n2pct = round((nim->nvox - nZero) * 0.02);
if ((n2pct < 1) || (mn == mx) || ((nim->nvox - nZero) < 100)) { //T Hanayik mentioned issue with very small volumes
*pct2 = mn;
*pct98 = mx;
return 0;
}
#define nBins 1001
flt scl = (nBins - 1) / (mx - mn);
int hist[nBins];
for (int i = 0; i < nBins; i++)
hist[i] = 0;
if (ignoreZeroVoxels) {
for (int i = 0; i < nim->nvox; i++) {
if (isnan(f32[i]))
continue;
if (f32[i] == 0.0)
continue;
hist[(int)round((f32[i] - mn) * scl)]++;
}
} else {
for (int i = 0; i < nim->nvox; i++) {
if (isnan(f32[i]))
continue;
hist[(int)round((f32[i] - mn) * scl)]++;
}
}
size_t n = 0;
size_t lo = 0;
while (n < n2pct) {
n += hist[lo];
//if (lo < 10)
// printf("%zu %zu %zu %d\n",lo, n, n2pct, ignoreZeroVoxels);
lo++;
}
lo--; //remove final increment
n = 0;
int hi = nBins;
while (n < n2pct) {
hi--;
n += hist[hi];
}
/*if ((lo+1) < hi) {
size_t nGray = 0;
for (int i = lo+1; i < hi; i++ ) {
nGray += hist[i];
//printf("%d %d\n", i, hist[i]);
}
float fracGray = (float)nGray/(float)(nim->nvox - nZero);
printf("histogram[%d..%d] = %zu %g\n", lo, hi, nGray, fracGray);
}*/
if (lo == hi) { //MAJORITY are not black or white
int ok = -1;
while (ok != 0) {
if (lo > 0) {
lo--;
if (hist[lo] > 0)
ok = 0;
}
if ((ok != 0) && (hi < (nBins - 1))) {
hi++;
if (hist[hi] > 0)
ok = 0;
}
if ((lo == 0) && (hi == (nBins - 1)))
ok = 0;
} //while not ok
}//if lo == hi
*pct2 = (lo) / scl + mn;
*pct98 = (hi) / scl + mn;
//printf("full range %g..%g (voxels 0 or NaN =%zu) robust range %g..%g\n", mn, mx, nZero, *pct2, *pct98);
return 0;
}
staticx flt* padImg3D( flt *imgIn, int *nx, int *ny, int *nz) {
//create an image with new first and last columns, rows, slices
int nxIn = (* nx);
int nxOut = (* nx) + 2;
int nyOut = (* ny) + 2;
int nzOut = (* nz) + 2;
int nvox3D = nxOut * nyOut * nzOut;
flt *imgOut= (flt *)_mm_malloc(nvox3D * sizeof(flt), 64);
memset(imgOut, 0, nvox3D * sizeof(flt)); //zero array
flt *imgOutP = imgOut;
flt *imgInP = imgIn;
imgOutP += 1;
for (int z = 0; z < nzOut; z++)
for (int y = 0; y < nyOut; y++) {
if ((z > 0) && (y > 0) && (z < (nzOut - 1)) && (y < (nyOut - 1))) {
xmemcpy(imgOutP, imgInP, nxIn * sizeof(flt)); //dest, src, count
imgInP += nxIn;
}
imgOutP += nxOut;
}
* nx = nxOut;
* ny = nyOut;
* nz = nzOut;
return imgOut;
}
staticx int nifti_binarize(nifti_image *nim, flt threshold) { //binarize image using Otsu's method
if (nim->nvox < 1)
return 1;
flt *inimg = (flt *)nim->data;
for (int i = 0; i < nim->nvox; i++) {
if (isnan(inimg[i]))
continue;
inimg[i] = (inimg[i] < threshold) ? 0.0 : 1.0;
}
nim->scl_inter = 0.0;
nim->scl_slope = 1.0;
nim->cal_min = 0.0;
nim->cal_max = 1.0;
return 0;
}
staticx flt brightest_voxel(nifti_image *nim) {
if (nim->nvox < 1)
return 0.0;
flt *img = (flt *)nim->data;
flt mx = -INFINITY; //in case 1st voxel is NaN
for (int i = 0; i < nim->nvox; i++) {
if (isnan(img[i])) continue;
mx = MAX(mx, img[i]);
}
return mx;
}
staticx flt darkest_voxel(nifti_image *nim) {
if (nim->nvox < 1)
return 0.0;
flt *img = (flt *)nim->data;
flt mn = INFINITY; //in case 1st voxel is NaN
for (int i = 0; i < nim->nvox; i++) {
if (isnan(img[i])) continue;
mn = MIN(mn, img[i]);
}
return mn;
}
staticx int nifti_mask_below(nifti_image *nim, flt threshold, int isZeroFill) {
//if isZeroFill set dark voxels to zero
//else set dark voxels to darkest
if (nim->nvox < 1)
return 1;
flt *inimg = (flt *)nim->data;
flt fill = 0.0;
if (!isZeroFill)
fill = darkest_voxel(nim);
for (int i = 0; i < nim->nvox; i++) {
if ((isnan(inimg[i])) || (inimg[i] >= threshold))
continue;
inimg[i] = 0.0;
}
return 0;
}
staticx int nifti_mask_below_dilate(nifti_image *nim, flt threshold, int isZeroFill) {
//mask dark voxels to zero ONLY if surrounded by other dark voxels
// this 'feathers' the edges of bright objects, capturing partial volumes
// isZeroFill determines if masked voxels are set to zero or the global darkest value
if (nim->nvox < 1)
return 1;
if ((nim->nx < 3) || (nim->ny < 3) || (nim->nz < 3))
return nifti_mask_below(nim, threshold, isZeroFill);
flt *inimg = (flt *)nim->data;
uint8_t *vxs = (uint8_t *)_mm_malloc(nim->nvox * sizeof(uint8_t), 64);
memset(vxs, 0, nim->nvox * sizeof(uint8_t));
for (int i = 0; i < nim->nvox; i++) {
if ((isnan(inimg[i])) || (inimg[i] >= threshold))
vxs[i] = 1;
}
size_t nx = nim->nx;
size_t nxy = nx * nim->ny;
size_t nvox3D = nxy * MAX(nim->nz, 1);
size_t nVol = nim->nvox / nvox3D;
for (int v = 0; v < nVol; v++) {
uint8_t *vxs2 = (uint8_t *)_mm_malloc(nvox3D * sizeof(uint8_t), 64);
uint8_t *tmp = vxs + (v * nvox3D);
xmemcpy(vxs2, tmp, nvox3D * sizeof(uint8_t)); //dest,src,bytes
size_t iv = (v * nvox3D);
for (int z = 1; z < (nim->nz - 1); z++) {
for (int y = 1; y < (nim->ny - 1); y++) {
size_t iyz = + (z * nxy) + (y * nim->nx);
for (int x = 1; x < (nx - 1); x++) {
size_t vx = iyz + x;
if (vxs[vx + iv] == 1) continue;
if ((vxs2[vx -1] == 1) || (vxs2[vx + 1] == 1)
|| (vxs2[vx - nx] == 1) || (vxs2[vx + nx] == 1)
|| (vxs2[vx - nxy] == 1) || (vxs2[vx + nxy] == 1))
vxs[vx] = 1;
} //x
} //y
} //z
} //v
flt fill = 0.0;
if (!isZeroFill)
fill = darkest_voxel(nim);
for (size_t i = 0; i < nim->nvox; i++) {
if (vxs[i] == 0)
inimg[i] = fill;
}
_mm_free(vxs);
return 0;
}
staticx int nifti_c2h(nifti_image *nim) {
//c2h: Cormack to Hounsfield
// https://github.com/neurolabusc/Clinical/blob/master/clinical_c2h.m
flt kUninterestingDarkUnits = 900.0; //e.g. -1000..-100
flt kInterestingMidUnits = 200.0; //e.g. unenhanced CT: -100..+100
flt kScaleRatio = 10;
flt kMax = kInterestingMidUnits * (kScaleRatio+1);
if (nim->nvox < 1)
return 1;
flt mn = darkest_voxel(nim);
if (mn < 0.0) {
printfx("Negative brightnesses impossible in the Cormack scale.\n");
return 1;
}
flt *img = (flt *)nim->data;
for (int i = 0; i < nim->nvox; i++) {
if (isnan(img[i]))
continue;
flt boost = img[i] - kUninterestingDarkUnits;
boost = MAX(boost, 0.0);
boost = MIN(boost, kInterestingMidUnits * kScaleRatio);
boost = boost * ((kScaleRatio - 1.0) / kScaleRatio);
img[i] = img[i] - boost - 1024.0;
}
return 0;
} // nifti_c2h()
staticx int nifti_h2c(nifti_image *nim) {
//h2c: Hounsfield to Cormack
// https://github.com/neurolabusc/Clinical/blob/master/clinical_h2c.m
flt kUninterestingDarkUnits = 900.0; //e.g. -1000..-100
flt kInterestingMidUnits = 200.0; //e.g. unenhanced CT: -100..+100
flt kMin = -1024.0; //some GE scanners place artificial rim around air
flt kScaleRatio = 10;
if (nim->nvox < 1)
return 1;
flt mn = darkest_voxel(nim);
flt mx = brightest_voxel(nim);
if ((mx < 100) || (mn > -500)) {
printfx("Image not in Hounsfield units: Intensity range %g..%g\n", mn, mx);
return 1;
}
flt *img = (flt *)nim->data;
if (mn < kMin) {//some GE scanners place artificial rim around air
for (int i = 0; i < nim->nvox; i++) {
if ((isnan(img[i])) || (img[i] >= kMin))
continue;
img[i] = kMin;
}
mn = kMin;
}
for (int i = 0; i < nim->nvox; i++) {
if (isnan(img[i]))
continue;
img[i] -= mn; //translate so min value is 0
flt boost = img[i] - kUninterestingDarkUnits;
boost = MAX(boost, 0.0);
boost = MIN(boost, kInterestingMidUnits);
boost = boost * (kScaleRatio - 1.0);
img[i] += boost;
}
return 0;
} // nifti_h2c()
staticx int nifti_otsu(nifti_image *nim, int mode, int makeBinary) { //binarize image using Otsu's method
//mode is 1..5 corresponding to 3/4, 2/3, 1/2 1/3 and 1/4 compartments made dark
//makeBinary: -1 replace dark with darkest, 0 = replace dark with 0, 1 = binary (0 or 1)
if ((nim->nvox < 1) || (nim->datatype != DT_CALC))
return 1;
//Create histogram of intensity frequency
// hist[0..kOtsuBins-1]: each bin is number of pixels with this intensity
flt mn, mx;
if (nifti_robust_range(nim, &mn, &mx, 0) != 0)
return 1;
if (mn >= mx)
return 1; //no variability
#define kOtsuBins 256
flt *inimg = (flt *)nim->data;
flt scl = (kOtsuBins - 1) / (mx - mn);
//create histogram
int hist[kOtsuBins];
for (int i = 0; i < kOtsuBins; i++)
hist[i] = 0;
for (int i = 0; i < nim->nvox; i++) {
if (isnan(inimg[i]))
continue;
int idx = (int)round((inimg[i] - mn) * scl);
idx = MIN(idx, kOtsuBins - 1);
idx = MAX(idx, 0);
hist[idx]++;
}
//attenuate influence of zero intensity: zero bin clamped to most frequent non-zero bin
// int idx0 = (int)round((0.0 - mn) * scl);
// int mostFrequentNot0 = 0;
// for (int i = 0; i < kOtsuBins; i++) {
// if (i == idx0) continue;
// if (hist[i] > mostFrequentNot0) mostFrequentNot0 = hist[i];
// }
// hist[idx0] = MIN(hist[idx0], mostFrequentNot0);
//compute Otsu
int thresh = nii_otsu(hist, kOtsuBins, mode);
flt threshold = (thresh / scl) + mn;
//printf("range %g..%g Otsu threshold %g\n", mn, mx, threshold);
if (makeBinary == 1)
return nifti_binarize(nim, threshold);
return nifti_mask_below_dilate(nim, threshold, (makeBinary == 0));
} // nifti_otsu()
staticx int nifti_unsharp(nifti_image *nim, flt SigmammX, flt SigmammY, flt SigmammZ, flt amount) {
//https://github.com/afni/afni/blob/699775eba3c58c816d13947b81cf3a800cec606f/src/edt_blur.c
if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
if (amount == 0.0)
return 0;
flt *inimg = (flt *)nim->data;
void *indat = (void *)nim->data;
flt mn = INFINITY; //better that inimg[0] in case NaN
flt mx = -INFINITY;
for (int i = 0; i < nim->nvox; i++) {
mn = MIN(mn, inimg[i]);
mx = MAX(mx, inimg[i]);
}
if (mn >= mx)
return 0; //no variability
size_t nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
size_t nVol = nim->nvox / nvox3D;
if ((nvox3D * nVol) != nim->nvox)
return 1;
//process each 3D volume independently: reduce memory pressure
nim->nvox = nvox3D;
flt *simg = (flt *)_mm_malloc(nim->nvox * sizeof(flt), 64); //output image
memset(simg, 0, nim->nvox * sizeof(flt)); //zero array
nim->data = (void *) simg;
for (int v = 0; v < nVol; v++) {
xmemcpy(simg, inimg, nim->nvox * sizeof(flt));
nifti_smooth_gauss(nim, SigmammX, SigmammY, SigmammZ, 2.5); //2.5: a relatively narrow kernel for speed
for (int i = 0; i < nim->nvox; i++) {
//sharpened = original + (original - blurred) * amount
inimg[i] += (inimg[i] - simg[i]) * amount;
//keep in original range
inimg[i] = MAX(inimg[i], mn);
inimg[i] = MIN(inimg[i], mx);
}
inimg += nim->nvox;
}
_mm_free(simg);
//return original data
nim->data = indat;
//nim->nvox = nvox3D * nVol;
return 0;
} //nifti_unsharp()
staticx int nifti_crop(nifti_image *nim, int tmin, int tsize) {
if (tsize == 0) {
printfx("tsize must not be 0\n");
return 1;
}
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
if ((nvox3D < 1) || ((nim->nvox % nvox3D) != 0))
return 1;
int nvol = (nim->nvox / nvox3D); //in
if (nvol < 2) {
printfx("crop only appropriate for 4D volumes");
return 1;
}
if (tmin >= nvol) {
printfx("tmin must be from 0..%d, not %d\n", nvol - 1, tmin);
return 1;
}
int tminVol = MAX(0, tmin);
int tFinalVol = tminVol + tsize - 1; //e.g. if tmin=0 and tsize=1, tFinal=0
if (tsize < 0) {
tFinalVol = INT_MAX;
}
tFinalVol = MIN(tFinalVol, nvol - 1);
if ((tminVol == 0) && (tFinalVol == (nvol - 1)))
return 0;
int nvolOut = tFinalVol - tminVol + 1;
flt *imgIn = (flt *)nim->data;
nim->nvox = nvox3D * nvolOut;
void *dat = (void *)calloc(1, nim->nvox * sizeof(flt));
flt *imgOut = (flt *)dat;
imgIn += tminVol * nvox3D;
xmemcpy(imgOut, imgIn, nim->nvox * sizeof(flt));
free(nim->data);
nim->data = dat;
if (nvolOut == 1)
nim->ndim = 3;
else
nim->ndim = 4;
//nim->dim[4] = nvolOut;
nim->nt = nvolOut;
#ifndef USING_WASM
nim->nu = 1;
nim->nv = 1;
nim->nw = 1;
#endif
return 0;
}
staticx void nifti_add2(flt *v, size_t n, flt intercept1) {
//#pragma omp parallel for
for (size_t i = 0; i < n; i++)
v[i] += intercept1;
} //nifti_add()
staticx int nifti_rescale(nifti_image *nim, double scale, double intercept) {
//linear transform of data
if (nim->nvox < 1)
return 1;
flt scl = scale;
flt inter = intercept;
flt *f32 = (flt *)nim->data;
if (intercept == 0.0) {
if (scale == 1.0)
return 0; //nothing to do
nifti_mul(f32, nim->nvox, scl);
return 0;
} else if (scale == 1.0) {
nifti_add(f32, nim->nvox, intercept);
return 0;
}
nifti_fma(f32, nim->nvox, scl, inter);
//for (size_t i = 0; i < nim->nvox; i++ )
// f32[i] = (f32[i] * scl) + inter;
return 0;
}
#ifndef USING_WASM
staticx int nifti_tfceS(nifti_image *nim, double H, double E, int c, int x, int y, int z, double tfce_thresh) {
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
if ((x < 0) || (x >= nim->nx) || (y < 0) || (y >= nim->ny) || (z < 0) || (z >= nim->nz)) {
printfx("tfceS x/y/z must be in range 0..%" PRId64 "/0..%" PRId64 "/0..%" PRId64 "\n", nim->nx - 1, nim->ny - 1, nim->nz - 1);
}
if (!neg_determ(nim))
x = nim->nx - x - 1;
int seed = x + (y * nim->nx) + (z * nim->nx * nim->ny);
flt *inimg = (flt *)nim->data;
if (inimg[seed] < H) {
printfx("it doesn't reach to specified threshold\n");
return 1;
}
size_t nvox3D = nim->nx * nim->ny * nim->nz;
if (nim->nvox > nvox3D) {
printfx("tfceS not suitable for 4D data.\n");
return 1;
}
//printf("peak %g\n", inimg[seed]);
int numk = c;
if ((c != 6) && (c != 18) && (c != 26)) {
printfx("suitable values for c are 6, 18 or 26\n");
numk = 6;
}
//set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap
int32_t *k = (int32_t *)_mm_malloc(3 * numk * sizeof(int32_t), 64); //kernel: offset, x, y
int mxDx = 1; //connectivity 6: faces only
if (numk == 18)
mxDx = 2; //connectivity 18: faces+edges
if (numk == 26)
mxDx = 3; //connectivity 26: faces+edges+corners
int j = 0;
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int dx = abs(x) + abs(y) + abs(z);
if ((dx > mxDx) || (dx == 0))
continue;
k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny);
k[j + numk] = x; //avoid left-right wrap
k[j + numk + numk] = x; //avoid anterior-posterior wrap
j++;
} //for x
flt mx = (inimg[0]);
for (size_t i = 0; i < nvox3D; i++)
mx = MAX((inimg[i]), mx);
double dh = mx / 100.0;
flt *outimg = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //output image
int32_t *q = (int32_t *)_mm_malloc(nvox3D * sizeof(int32_t), 64); //queue with untested seed
uint8_t *vxs = (uint8_t *)_mm_malloc(nvox3D * sizeof(uint8_t), 64);
memset(outimg, 0, nvox3D * sizeof(flt)); //zero array
//for (int i = 0; i < nvox3D; i++)
// outimg[i] = 0.0;
int n_steps = (int)ceil(mx / dh);
//for (int step=0; step<n_steps; step++) {
for (int step = n_steps - 1; step >= 0; step--) {
flt thresh = (step + 1) * dh;
memset(vxs, 0, nvox3D * sizeof(uint8_t));
for (int i = 0; i < nvox3D; i++)
if (inimg[i] >= thresh)
vxs[i] = 1; //survives, unclustered
int qlo = 0;
int qhi = 0;
q[qhi] = seed; //add starting voxel as seed in queue
vxs[seed] = 0; //do not find again!
while (qhi >= qlo) { //first in, first out queue
//retire one seed, add 0..6, 0..18 or 0..26 new ones (depending on connectivity)
for (int j = 0; j < numk; j++) {
int jj = q[qlo] + k[j];
if ((jj < 0) || (jj >= nvox3D))
continue; //voxel in volume
if (vxs[jj] == 0)
continue; //already found or did not survive threshold
int dx = x + k[j + numk];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + k[j + numk + numk];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
//add new seed:
vxs[jj] = 0; //do not find again!
qhi++;
q[qhi] = jj;
}
qlo++;
} //while qhi >= qlo: continue until all seeds tested
flt valToAdd = pow(qhi + 1, E) * pow(thresh, H); //"supporting section", Dark Gray in Figure 1
for (int j = 0; j <= qhi; j++)
outimg[q[j]] += valToAdd;
//printf("step %d thresh %g\n", step, outimg[seed]);
if (outimg[seed] >= tfce_thresh)
break;
} //for each step
if (outimg[seed] < tfce_thresh)
printfx("it doesn't reach to specified threshold (%g < %g)\n", outimg[seed], tfce_thresh);
for (size_t i = 0; i < nvox3D; i++)
if (outimg[i] == 0.0)
inimg[i] = 0.0;
_mm_free(q);
_mm_free(vxs);
_mm_free(outimg);
_mm_free(k);
return 0;
}
#endif
staticx int nifti_tfce(nifti_image *nim, double H, double E, int c) {
//https://www.fmrib.ox.ac.uk/datasets/techrep/tr08ss1/tr08ss1.pdf
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nvox3D = nim->nx * nim->ny * nim->nz;
int nvol = nim->nvox / nvox3D;
int numk = c;
if ((c != 6) && (c != 18) && (c != 26)) {
printfx("suitable values for c are 6, 18 or 26\n");
numk = 6;
}
//set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap
int32_t *k = (int32_t *)_mm_malloc(3 * numk * sizeof(int32_t), 64); //kernel: offset, x, y
int mxDx = 1; //connectivity 6: faces only
if (numk == 18)
mxDx = 2; //connectivity 18: faces+edges
if (numk == 26)
mxDx = 3; //connectivity 26: faces+edges+corners
int j = 0;
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int dx = abs(x) + abs(y) + abs(z);
if ((dx > mxDx) || (dx == 0))
continue;
k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny);
k[j + numk] = x; //avoid left-right wrap
k[j + numk + numk] = x; //avoid anterior-posterior wrap
j++;
} //for x
//omp notes: here we compute each volume independently.
// Christian Gaser computes the step loop in parallel, which accelerates 3D cases
// This code is very quick on 3D, so this does not seem crucial, and avoids critical sections
#pragma omp parallel for
for (int vol = 0; vol < nvol; vol++) {
//identify clusters
flt *inimg = (flt *)nim->data;
inimg += vol * nvox3D;
flt mx = (inimg[0]);
for (size_t i = 0; i < nvox3D; i++)
mx = MAX((inimg[i]), mx);
double dh = mx / 100.0;
flt *outimg = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64); //output image
int32_t *q = (int32_t *)_mm_malloc(nvox3D * sizeof(int32_t), 64); //queue with untested seed
uint8_t *vxs = (uint8_t *)_mm_malloc(nvox3D * sizeof(uint8_t), 64);
memset(outimg, 0, nvox3D * sizeof(flt)); //zero array
//for (int i = 0; i < nvox3D; i++)
// outimg[i] = 0.0;
int n_steps = (int)ceil(mx / dh);
for (int step = 0; step < n_steps; step++) {
flt thresh = (step + 1) * dh;
memset(vxs, 0, nvox3D * sizeof(uint8_t));
for (int i = 0; i < nvox3D; i++)
if (inimg[i] >= thresh)
vxs[i] = 1; //survives, unclustered
int i = 0;
for (int z = 0; z < nim->nz; z++)
for (int y = 0; y < nim->ny; y++)
for (int x = 0; x < nim->nx; x++) {
if (vxs[i] == 0) {
i++;
continue;
} //voxel did not survive or already clustered
int qlo = 0;
int qhi = 0;
q[qhi] = i; //add starting voxel as seed in queue
vxs[i] = 0; //do not find again!
while (qhi >= qlo) { //first in, first out queue
//retire one seed, add 0..6, 0..18 or 0..26 new ones (depending on connectivity)
for (int j = 0; j < numk; j++) {
int jj = q[qlo] + k[j];
if ((jj < 0) || (jj >= nvox3D))
continue; //voxel in volume
if (vxs[jj] == 0)
continue; //already found or did not survive threshold
int dx = x + k[j + numk];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + k[j + numk + numk];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
//add new seed:
vxs[jj] = 0; //do not find again!
qhi++;
q[qhi] = jj;
}
qlo++;
} //while qhi >= qlo: continue until all seeds tested
flt valToAdd = pow(qhi + 1, E) * pow(thresh, H); //"supporting section", Dark Gray in Figure 1
for (int j = 0; j <= qhi; j++)
outimg[q[j]] += valToAdd;
i++;
} //for each voxel
} //for each step
for (int i = 0; i < nvox3D; i++)
inimg[i] = outimg[i];
_mm_free(q);
_mm_free(vxs);
_mm_free(outimg);
}
_mm_free(k);
return 0;
} //nifti_tfce()
staticx int nifti_grid(nifti_image *nim, double v, int spacing) {
if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2))
return 1;
if (nim->datatype != DT_CALC)
return 1;
size_t nxy = (nim->nx * nim->ny);
size_t nzt = nim->nvox / nxy;
flt *f32 = (flt *)nim->data;
flt fv = v;
#pragma omp parallel for
for (size_t i = 0; i < nzt; i++) { //for each 2D slices
size_t so = i * nxy; //slice offset
int z = (i % nim->nz);
if ((nim->nz > 1) && ((z % spacing) == 0)) { //whole slice is grid
for (size_t j = 0; j < nxy; j++)
f32[so++] = fv;
continue;
}
for (size_t y = 0; y < nim->ny; y++)
for (size_t x = 0; x < nim->nx; x++) {
if ((x % spacing) == 0)
f32[so] = fv;
so++;
}
so = i * nxy; //slice offset
for (size_t y = 0; y < nim->ny; y++)
for (size_t x = 0; x < nim->nx; x++) {
if ((y % spacing) == 0)
f32[so] = fv;
so++;
}
} //for i: each 2D slice
return 0;
}
staticx int nifti_rem(nifti_image *nim, double v, int isFrac) {
//remainder (modulo) : fslmaths
/*fmod(0.45, 2) = 0.45 : 0
fmod(0.9, 2) = 0.9 : 0
fmod(1.35, 2) = 1.35 : 1
fmod(1.8, 2) = 1.8 : 1
fmod(-0.45, 2) = -0.45 : 0
fmod(-0.9, 2) = -0.9 : 0
fmod(-1.35, 2) = -1.35 : -1
fmod(-1.8, 2) = -1.8 : -1
*/
if (nim->datatype != DT_CALC)
return 1;
if (nim->nvox < 1)
return 1;
if (v == 0.0) {
printfx("Exception: '-rem 0' does not make sense\n");
return 1;
}
flt fv = v;
flt *f32 = (flt *)nim->data;
if (isFrac) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = fmod(f32[i], fv);
} else {
for (size_t i = 0; i < nim->nvox; i++) {
//printf("fmod(%g, %g) = %g : %g\n", f32[i], fv, fmod(f32[i],fv), trunc(fmod(f32[i],fv)) );
f32[i] = trunc(fmod(f32[i], fv));
}
}
return 0;
}
staticx int nifti_thr(nifti_image *nim, double v, int modifyBrightVoxels, float newIntensity) {
if (nim->nvox < 1)
return 1;
if (nim->datatype == DT_CALC) {
flt fv = v;
flt *f32 = (flt *)nim->data;
if (modifyBrightVoxels) {
for (size_t i = 0; i < nim->nvox; i++)
if (f32[i] > fv)
f32[i] = newIntensity;
} else {
for (size_t i = 0; i < nim->nvox; i++)
if (f32[i] < fv)
f32[i] = newIntensity;
}
return 0;
}
printfx("nifti_thr: Unsupported datatype %d\n", nim->datatype);
return 1;
} // nifti_thr()
staticx int nifti_max(nifti_image *nim, double v, int useMin) {
if (nim->nvox < 1)
return 1;
if (nim->datatype == DT_CALC) {
flt fv = v;
flt *f32 = (flt *)nim->data;
if (useMin) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = fmin(f32[i], fv);
} else {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = fmax(f32[i], fv);
}
return 0;
}
printfx("nifti_max: Unsupported datatype %d\n", nim->datatype);
return 1;
} // nifti_max()
staticx int nifti_inm(nifti_image *nim, double M) {
//https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;bf9d21d2.1610
//With '-inm <value>', every voxel in the input volume is multiplied by <value> / M
// where M is the mean across all voxels.
//n.b.: regardless of description, mean appears to only include voxels > 0
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
if ((nvox3D < 1) || ((nim->nvox % nvox3D) != 0))
return 1;
int nvol = nim->nvox / nvox3D;
flt *f32 = (flt *)nim->data;
#pragma omp parallel for
for (int v = 0; v < nvol; v++) {
size_t vi = v * nvox3D;
double sum = 0.0;
#define gt0
#ifdef gt0
int n = 0;
for (size_t i = 0; i < nvox3D; i++) {
if (f32[vi + i] > 0.0f) {
n++;
sum += f32[vi + i];
}
}
if (sum == 0.0)
continue;
double ave = sum / n;
#else
for (int i = 0; i < nvox3D; i++)
sum += f32[vi + i];
if (sum == 0.0)
continue;
double ave = sum / nvox3D;
#endif
//printf("%g %g\n", ave, M);
flt scale = M / ave;
for (int i = 0; i < nvox3D; i++)
f32[vi + i] *= scale;
}
return 0;
} // nifti_inm()
staticx int nifti_ing(nifti_image *nim, double M) {
//https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=fsl;bf9d21d2.1610
//With '-inm <value>', every voxel in the input volume is multiplied by <value> / M
// where M is the mean across all voxels.
//n.b.: regardless of description, mean appears to only include voxels > 0
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt *f32 = (flt *)nim->data;
double sum = 0.0;
int n = 0;
for (size_t i = 0; i < nim->nvox; i++) {
if (f32[i] > 0.0f) {
n++;
sum += f32[i];
}
}
if (sum == 0)
return 0;
double ave = sum / n;
flt scale = M / ave;
#pragma omp parallel for
for (int i = 0; i < nim->nvox; i++)
f32[i] *= scale;
return 0;
} //nifti_ing()
staticx int compare(const void *a, const void *b) {
flt fa = *(const flt *)a;
flt fb = *(const flt *)b;
return (fa > fb) - (fa < fb);
}
staticx void dtrend(flt *xx, int npt, int pt0) {
//linear detrend, first point is set to zero
// if pt0=0 then mean is zero, pt0=1 then first point is zero, if pt0=2 final point is zero
double t1, t3, t10, x0, x1;
int ii;
if (npt < 2 || xx == NULL)
return;
x0 = xx[0];
x1 = 0.0;
for (ii = 1; ii < npt; ii++) {
x0 += xx[ii];
x1 += xx[ii] * ii;
}
t1 = npt * x0;
t3 = 1.0 / npt;
t10 = npt * npt;
double f0 = (double)(2.0 / (npt + 1.0) * t3 * (2.0 * t1 - 3.0 * x1 - x0));
double f1 = (double)(-6.0 / (t10 - 1.0) * t3 * (-x0 - 2.0 * x1 + t1));
//printf("%.8g %.8g %g\n", f0, f1, xx[0]);
if (pt0 == 1)
f0 = xx[0];
if (pt0 == 2)
f0 = xx[npt - 1] - (f1 * (npt - 1));
for (ii = 0; ii < npt; ii++)
xx[ii] -= (f0 + f1 * ii);
}
staticx int nifti_detrend_linear(nifti_image *nim) {
if (nim->datatype != DT_CALC)
return 1;
size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz);
if (nvox3D < 1)
return 1;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol < 2) {
printfx("detrend requires a 4D image with at least three volumes\n");
return 1;
}
flt *img = (flt *)nim->data;
#pragma omp parallel for
for (size_t i = 0; i < nvox3D; i++) {
flt *data = (flt *)_mm_malloc(nvol * sizeof(flt), 64);
//load one voxel across all timepoints
int j = 0;
for (size_t v = i; v < nim->nvox; v += nvox3D) {
data[j] = img[v];
j++;
}
//detrend
dtrend(data, nvol, 0);
//save one voxel across all timepoints
j = 0;
for (size_t v = i; v < nim->nvox; v += nvox3D) {
img[v] = data[j];
j++;
}
_mm_free(data);
}
return 0;
}
#ifdef bandpass
//https://github.com/QtSignalProcessing/QtSignalProcessing/blob/master/src/iir.cpp
//https://github.com/rkuchumov/day_plot_diagrams/blob/8df48af431dc76b1656a627f1965d83e8693ddd7/data.c
//https://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html
// Sample rate and desired cutoff frequencies (in Hz).
// double highcut = 1250;
// double lowcut = 500;
// double samp_rate = 5000;
//[b,a] = butter(2, [0.009, 0.08]);
//https://afni.nimh.nih.gov/afni/community/board/read.php?1,84373,137180#msg-137180
//Power 2011, Satterthwaite 2013, Carp 2011, Power's reply to Carp 2012
// https://github.com/lindenmp/rs-fMRI/blob/master/func/ButterFilt.m
//https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html
/*
The function butterworth_filter() emulates Jan Simon's FiltFiltM
it uses Gustafsson’s method and padding to reduce ringing at start/end
https://www.mathworks.com/matlabcentral/fileexchange/32261-filterm?focused=5193423&tab=function
Copyright (c) 2011, Jan Simon
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.*/
staticx int butterworth_filter(flt *img, int nvox3D, int nvol, double fs, double highcut, double lowcut) {
//sample rate, low cut and high cut are all in Hz
//this attempts to emulate performance of https://www.mathworks.com/matlabcentral/fileexchange/32261-filterm
// specifically, prior to the forward and reverse pass the coefficients are estimated by a forward and reverse pass
int order = 2;
if (order <= 0)
return 1;
if ((highcut <= 0.0) && (lowcut <= 0.0))
return 1;
if (fs <= 0.0)
return 1;
if ((lowcut > 0.0) && (highcut > 0.0))
printfx("butter bandpass lowcut=%g highcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", lowcut, highcut, fs, order, 2 * order);
else if (highcut > 0.0)
printfx("butter lowpass highcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", highcut, fs, order, 2 * order);
else if (lowcut > 0.0)
printfx("butter highpass lowcut=%g fs=%g order=%d (effectively %d due to filtfilt)\n", lowcut, fs, order, 2 * order);
else {
printfx("Butterworth parameters do not make sense\n");
return 1;
}
double *a;
double *b;
double *IC;
int nX = nvol;
int nA = 0;
nA = butter_design(order, 2.0 * lowcut / fs, 2.0 * highcut / fs, &a, &b, &IC);
int nEdge = 3 * (nA - 1);
if ((nA < 1) || (nX <= nEdge)) {
printfx("filter requires at least %d samples\n", nEdge);
_mm_free(a);
_mm_free(b);
_mm_free(IC);
return 1;
}
#pragma omp parallel for
for (int vx = 0; vx < nvox3D; vx++) {
double *X = (double *)_mm_malloc(nX * sizeof(double), 64);
size_t vo = vx;
flt mn = INFINITY;
flt mx = -INFINITY;
for (int j = 0; j < nX; j++) {
X[j] = img[vo];
mn = MIN(mn, X[j]);
mx = MAX(mx, X[j]);
vo += nvox3D;
}
if (mn < mx) { //some variability
double *Xi = (double *)_mm_malloc(nEdge * sizeof(double), 64);
for (int i = 0; i < nEdge; i++)
Xi[nEdge - i - 1] = X[0] - (X[i + 1] - X[0]);
double *CC = (double *)_mm_malloc((nA - 1) * sizeof(double), 64);
for (int i = 0; i < (nA - 1); i++)
CC[i] = IC[i] * Xi[0];
double *Xf = (double *)_mm_malloc(nEdge * sizeof(double), 64);
for (int i = 0; i < nEdge; i++)
Xf[i] = X[nX - 1] - (X[nX - 2 - i] - X[nX - 1]);
Filt(Xi, nEdge, a, b, nA - 1, CC); //filter head
Filt(X, nX, a, b, nA - 1, CC); //filter array
Filt(Xf, nEdge, a, b, nA - 1, CC); //filter tail
//reverse
for (int i = 0; i < (nA - 1); i++)
CC[i] = IC[i] * Xf[nEdge - 1];
FiltRev(Xf, nEdge, a, b, nA - 1, CC); //filter tail
FiltRev(X, nX, a, b, nA - 1, CC); //filter array
_mm_free(Xi);
_mm_free(Xf);
_mm_free(CC);
} else { //else no variability: set all voxels to zero
for (int j = 0; j < nX; j++)
X[j] = 0;
}
//save data to 4D array
vo = vx;
for (int j = 0; j < nX; j++) {
img[vo] = X[j];
vo += nvox3D;
}
_mm_free(X);
} //for vx
_mm_free(b);
_mm_free(a);
_mm_free(IC);
return 0;
}
staticx int nifti_bandpass(nifti_image *nim, double hp_hz, double lp_hz, double TRsec) {
if (nim->datatype != DT_CALC)
return 1;
size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz);
if (TRsec <= 0.0)
TRsec = nim->nt; //pixdim[4];
if (TRsec <= 0) {
printfx("Unable to determine sample rate\n");
return 1;
}
if (nvox3D < 1)
return 1;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol < 1) {
printfx("bandpass requires 4D datasets\n");
return 1;
}
return butterworth_filter((flt *)nim->data, nvox3D, nvol, 1 / TRsec, hp_hz, lp_hz);
}
#endif
//#define DEBUG_ENABLED
#ifdef DEBUG_ENABLED
staticx int xyzt2txyz(nifti_image *nim) {
size_t nxyz = nim->nx * nim->ny * nim->nz;
size_t nt = nim->nt;
if ((nim->nvox < 1) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1) || (nt < 2))
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt *img = (flt *)nim->data;
flt *inimg = (flt *)_mm_malloc(nxyz * nt * sizeof(flt), 64); //alloc for each volume to allow openmp
xmemcpy(inimg, img, nim->nvox * sizeof(flt));
size_t i = 0;
#pragma omp parallel for
for (size_t x = 0; x < nxyz; x++) {
for (size_t t = 0; t < nt; t++) {
img[i] = inimg[x + t * nxyz];
i++;
}
}
_mm_free(inimg);
return 0;
}
staticx int txyz2xyzt(nifti_image *nim) {
size_t nxyz = nim->nx * nim->ny * nim->nz;
size_t nt = nim->nt;
if ((nim->nvox < 1) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1) || (nt < 2))
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt *img = (flt *)nim->data;
flt *inimg = (flt *)_mm_malloc(nxyz * nt * sizeof(flt), 64); //alloc for each volume to allow openmp
xmemcpy(inimg, img, nim->nvox * sizeof(flt));
size_t i = 0;
#pragma omp parallel for
for (size_t x = 0; x < nxyz; x++) {
for (size_t t = 0; t < nt; t++) {
img[x + t * nxyz] = inimg[i];
i++;
}
}
_mm_free(inimg);
return 0;
}
staticx int nifti_bptf(nifti_image *nim, double hp_sigma, double lp_sigma, int demean) {
//Spielberg Matlab code: https://cpb-us-w2.wpmucdn.com/sites.udel.edu/dist/7/4542/files/2016/09/fsl_temporal_filt-15sywxn.m
//5.0.7 highpass temporal filter removes the mean component https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/WhatsNew#anchor1
// http://www.fast.u-psud.fr/ezyfit/html/ezfit.html
//gauss fitting functions: y = a*exp(-(x-x0)^2/(2*s^2))
// regression formula (https://www.mathsisfun.com/data/least-squares-regression.html) modulated by weight
if (nim->datatype != DT_CALC)
return 1;
if ((hp_sigma <= 0) && (lp_sigma <= 0))
return 0;
size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz);
if (nvox3D < 1)
return 1;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol < 1) {
printfx("bptf requires 4D datasets\n");
return 1;
}
int *hpStart, *hpEnd;
double *hpSumX, *hpDenom, *hpSumWt, *hp, *hp0;
if (hp_sigma > 0) { //initialize high-pass reusables
//Spielberg's code uses 8*sigma, does not match current fslmaths:
//tested with fslmaths freq4d -bptf 10 -1 nhp
//cutoff ~3: most difference: 4->0.0128902 3->2.98023e-08 2->-0.0455322 1->0.379412
int cutoffhp = ceil(3 * hp_sigma); //to do: check this! ~3
hp = (double *)_mm_malloc((cutoffhp + 1 + cutoffhp) * sizeof(double), 64); //-cutoffhp..+cutoffhp
hp0 = hp + cutoffhp; //convert from 0..(2*cutoffhp) to -cutoffhp..+cutoffhp
for (int k = -cutoffhp; k <= cutoffhp; k++) //for each index in kernel
hp0[k] = exp(-sqr(k) / (2 * sqr(hp_sigma)));
hpStart = (int *)_mm_malloc(nvol * sizeof(int), 64);
hpEnd = (int *)_mm_malloc(nvol * sizeof(int), 64);
hpSumX = (double *)_mm_malloc(nvol * sizeof(double), 64); //
hpDenom = (double *)_mm_malloc(nvol * sizeof(double), 64); // N*Sum(x^2) - (Sum(x))^2
hpSumWt = (double *)_mm_malloc(nvol * sizeof(double), 64); //sum of weight, N
for (int v = 0; v < nvol; v++) {
//linear regression with "gauss" fitting
hpStart[v] = MAX(0, v - cutoffhp);
hpEnd[v] = MIN(nvol - 1, v + cutoffhp);
double sumX = 0.0;
double sumX2 = 0.0;
double sumWt = 0.0;
for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel
int x = k - v;
double wt = hp0[x]; //kernel weight
sumX += wt * x;
sumX2 += wt * x * x;
sumWt += wt;
}
hpSumX[v] = sumX;
hpDenom[v] = (sumWt * sumX2) - sqr(sumX); // N*Sum(x^2) - (Sum(x))^2
if (hpDenom[v] == 0.0)
hpDenom[v] = 1.0; //should never happen, x is known index
hpDenom[v] = 1.0 / hpDenom[v]; //use reciprocal so we can use faster multiplication later
hpSumWt[v] = sumWt;
} //for each volume
} //high-pass reusables
//low-pass AFTER high-pass: fslmaths freq4d -bptf 45 5 fbp
int *lpStart, *lpEnd;
double *lpSumWt, *lp, *lp0;
if (lp_sigma > 0) { //initialize low-pass reusables
//simple Gaussian blur in time domain
//freq4d -bptf -1 5 flp
// fslmaths rest -bptf -1 5 flp
// 3->0.00154053 4->3.5204e-05 5->2.98023e-07, 6->identical
// Spielberg's code uses 8*sigma, so we will use that, even though precision seems excessive
int cutofflp = ceil(8 * lp_sigma); //to do: check this! at least 6
lp = (double *)_mm_malloc((cutofflp + 1 + cutofflp) * sizeof(double), 64); //-cutofflp..+cutofflp
lp0 = lp + cutofflp; //convert from 0..(2*cutofflp) to -cutofflp..+cutofflp
for (int k = -cutofflp; k <= cutofflp; k++) //for each index in kernel
lp0[k] = exp(-sqr(k) / (2 * sqr(lp_sigma)));
lpStart = (int *)_mm_malloc(nvol * sizeof(int), 64);
lpEnd = (int *)_mm_malloc(nvol * sizeof(int), 64);
lpSumWt = (double *)_mm_malloc(nvol * sizeof(double), 64); //sum of weight, N
for (int v = 0; v < nvol; v++) {
lpStart[v] = MAX(0, v - cutofflp);
lpEnd[v] = MIN(nvol - 1, v + cutofflp);
double sumWt = 0.0;
for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel
sumWt += lp0[k - v]; //kernel weight
if (sumWt == 0.0)
sumWt = 1.0; //will never happen
lpSumWt[v] = 1.0 / sumWt; //use reciprocal so we can use faster multiplication later
} //for each volume
} //low-pass reusables
//https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=FSL;5b8cace9.0902
//if TR=2s and 100 second cutoff is requested choose "-bptf 50 -1"
//The 'cutoff' is defined as the FWHM of the filter, so if you ask for
//100s that means 50 Trs, so the sigma, or HWHM, is 25 TRs.
// -bptf <hp_sigma> <lp_sigma>
xyzt2txyz(nim);
flt *img = (flt *)nim->data;
#pragma omp parallel for
for (size_t i = 0; i < nvox3D; i++) {
//read input data
flt *imgIn = (flt *)_mm_malloc((nvol) * sizeof(flt), 64);
flt *imgOut = img + (i * nvol);
xmemcpy(imgIn, imgOut, nvol * sizeof(flt));
if (hp_sigma > 0) {
double sumOut = 0.0;
for (int v = 0; v < nvol; v++) { //each volume
double sumY = 0.0;
double sumXY = 0.0;
for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel
int x = k - v;
double wt = hp0[x];
flt y = imgIn[k];
sumY += wt * y;
sumXY += wt * x * y;
}
double n = hpSumWt[v];
double m = ((n * sumXY) - (hpSumX[v] * sumY)) * hpDenom[v]; //slope
double b = (sumY - (m * hpSumX[v])) / n; //intercept
imgOut[v] = imgIn[v] - b;
sumOut += imgOut[v];
} //for each volume
//"fslmaths -bptf removes timeseries mean (for FSL 5.0.7 onward)" n.b. except low-pass
double mean = sumOut / (double)nvol; //de-mean AFTER high-pass
if (demean) {
for (int v = 0; v < nvol; v++) //each volume
imgOut[v] -= mean;
}
} //hp_sigma > 0
if (lp_sigma > 0) { //low pass does not de-mean data
//if BOTH low-pass and high-pass, apply low pass AFTER high pass:
// fslmaths freq4d -bptf 45 5 fbp
// difference 1.86265e-08
//still room for improvement:
// fslmaths /Users/chris/src/rest -bptf 45 5 fbp
// r=1.0 identical voxels 73% max difference 0.000488281
if (hp_sigma > 0)
xmemcpy(imgIn, imgOut, nvol * sizeof(flt));
for (int v = 0; v < nvol; v++) { //each volume
double sum = 0.0;
for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel
sum += imgIn[k] * lp0[k - v];
imgOut[v] = sum * lpSumWt[v];
} // for each volume
} //lp_sigma > 0
_mm_free(imgIn);
}
txyz2xyzt(nim);
if (hp_sigma > 0) { //initialize high-pass reuseables
_mm_free(hp);
_mm_free(hpStart);
_mm_free(hpEnd);
_mm_free(hpSumX);
_mm_free(hpDenom);
_mm_free(hpSumWt);
}
if (lp_sigma > 0) { //initialize high-pass reuseables
_mm_free(lp);
_mm_free(lpStart);
_mm_free(lpEnd);
_mm_free(lpSumWt);
}
return 0;
} // nifti_bptf()
#else
staticx int nifti_bptf(nifti_image *nim, double hp_sigma, double lp_sigma, int demean) {
//Spielberg Matlab code: https://cpb-us-w2.wpmucdn.com/sites.udel.edu/dist/7/4542/files/2016/09/fsl_temporal_filt-15sywxn.m
//5.0.7 highpass temporal filter removes the mean component https://fsl.fmrib.ox.ac.uk/fsl/fslwiki/WhatsNew#anchor1
// http://www.fast.u-psud.fr/ezyfit/html/ezfit.html
//gauss fitting functions: y = a*exp(-(x-x0)^2/(2*s^2))
// regression formula (https://www.mathsisfun.com/data/least-squares-regression.html) modulated by weight
if (nim->datatype != DT_CALC)
return 1;
if ((hp_sigma <= 0) && (lp_sigma <= 0))
return 0;
size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz);
if (nvox3D < 1)
return 1;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol < 1) {
printfx("bptf requires 4D datasets\n");
return 1;
}
int *hpStart, *hpEnd;
double *hpSumX, *hpDenom, *hpSumWt, *hp, *hp0;
if (hp_sigma > 0) { //initialize high-pass reusables
//Spielberg's code uses 8*sigma, does not match current fslmaths:
//tested with fslmaths freq4d -bptf 10 -1 nhp
//cutoff ~3: most difference: 4->0.0128902 3->2.98023e-08 2->-0.0455322 1->0.379412
int cutoffhp = ceil(3 * hp_sigma); //to do: check this! ~3
hp = (double *)_mm_malloc((cutoffhp + 1 + cutoffhp) * sizeof(double), 64); //-cutoffhp..+cutoffhp
hp0 = hp + cutoffhp; //convert from 0..(2*cutoffhp) to -cutoffhp..+cutoffhp
for (int k = -cutoffhp; k <= cutoffhp; k++) //for each index in kernel
hp0[k] = exp(-sqr(k) / (2 * sqr(hp_sigma)));
hpStart = (int *)_mm_malloc(nvol * sizeof(int), 64);
hpEnd = (int *)_mm_malloc(nvol * sizeof(int), 64);
hpSumX = (double *)_mm_malloc(nvol * sizeof(double), 64);
hpDenom = (double *)_mm_malloc(nvol * sizeof(double), 64); // N*Sum(x^2) - (Sum(x))^2
hpSumWt = (double *)_mm_malloc(nvol * sizeof(double), 64); //sum of weight, N
for (int v = 0; v < nvol; v++) {
//linear regression with "gauss" fitting
hpStart[v] = MAX(0, v - cutoffhp);
hpEnd[v] = MIN(nvol - 1, v + cutoffhp);
double sumX = 0.0;
double sumX2 = 0.0;
double sumWt = 0.0;
for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel
int x = k - v;
double wt = hp0[x]; //kernel weight
sumX += wt * x;
sumX2 += wt * x * x;
sumWt += wt;
}
hpSumX[v] = sumX;
hpDenom[v] = (sumWt * sumX2) - sqr(sumX); // N*Sum(x^2) - (Sum(x))^2
if (hpDenom[v] == 0.0)
hpDenom[v] = 1.0; //should never happen, x is known index
hpDenom[v] = 1.0 / hpDenom[v]; //use reciprocal so we can use faster multiplication later
hpSumWt[v] = sumWt;
} //for each volume
} //high-pass reusables
//low-pass AFTER high-pass: fslmaths freq4d -bptf 45 5 fbp
int *lpStart, *lpEnd;
double *lpSumWt, *lp, *lp0;
if (lp_sigma > 0) { //initialize low-pass reusables
//simple Gaussian blur in time domain
//freq4d -bptf -1 5 flp
// fslmaths rest -bptf -1 5 flp
// 3->0.00154053 4->3.5204e-05 5->2.98023e-07, 6->identical
// Spielberg's code uses 8*sigma, so we will use that, even though precision seems excessive
int cutofflp = ceil(8 * lp_sigma); //to do: check this! at least 6
lp = (double *)_mm_malloc((cutofflp + 1 + cutofflp) * sizeof(double), 64); //-cutofflp..+cutofflp
lp0 = lp + cutofflp; //convert from 0..(2*cutofflp) to -cutofflp..+cutofflp
for (int k = -cutofflp; k <= cutofflp; k++) //for each index in kernel
lp0[k] = exp(-sqr(k) / (2 * sqr(lp_sigma)));
lpStart = (int *)_mm_malloc(nvol * sizeof(int), 64);
lpEnd = (int *)_mm_malloc(nvol * sizeof(int), 64);
lpSumWt = (double *)_mm_malloc(nvol * sizeof(double), 64); //sum of weight, N
for (int v = 0; v < nvol; v++) {
lpStart[v] = MAX(0, v - cutofflp);
lpEnd[v] = MIN(nvol - 1, v + cutofflp);
double sumWt = 0.0;
for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel
sumWt += lp0[k - v]; //kernel weight
if (sumWt == 0.0)
sumWt = 1.0; //will never happen
lpSumWt[v] = 1.0 / sumWt; //use reciprocal so we can use faster multiplication later
} //for each volume
} //low-pass reusables
//https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=FSL;5b8cace9.0902
//if TR=2s and 100 second cutoff is requested choose "-bptf 50 -1"
//The 'cutoff' is defined as the FWHM of the filter, so if you ask for
//100s that means 50 Trs, so the sigma, or HWHM, is 25 TRs.
// -bptf <hp_sigma> <lp_sigma>
flt *img = (flt *)nim->data;
#pragma omp parallel for
for (size_t i = 0; i < nvox3D; i++) {
//read input data
flt *imgIn = (flt *)_mm_malloc((nvol) * sizeof(flt), 64);
flt *imgOut = (flt *)_mm_malloc((nvol) * sizeof(flt), 64);
int j = 0;
for (size_t v = i; v < nim->nvox; v += nvox3D) {
imgIn[j] = img[v];
j++;
}
if (hp_sigma > 0) {
double sumOut = 0.0;
for (int v = 0; v < nvol; v++) { //each volume
double sumY = 0.0;
double sumXY = 0.0;
for (int k = hpStart[v]; k <= hpEnd[v]; k++) { //for each index in kernel
int x = k - v;
double wt = hp0[x];
flt y = imgIn[k];
sumY += wt * y;
sumXY += wt * x * y;
}
double n = hpSumWt[v];
double m = ((n * sumXY) - (hpSumX[v] * sumY)) * hpDenom[v]; //slope
double b = (sumY - (m * hpSumX[v])) / n; //intercept
imgOut[v] = imgIn[v] - b;
sumOut += imgOut[v];
} //for each volume
//"fslmaths -bptf removes timeseries mean (for FSL 5.0.7 onward)" n.b. except low-pass
double mean = sumOut / (double)nvol; //de-mean AFTER high-pass
if (demean) {
for (int v = 0; v < nvol; v++) //each volume
imgOut[v] -= mean;
}
} //hp_sigma > 0
if (lp_sigma > 0) { //low pass does not de-mean data
//if BOTH low-pass and high-pass, apply low pass AFTER high pass:
// fslmaths freq4d -bptf 45 5 fbp
// difference 1.86265e-08
//still room for improvement:
// fslmaths /Users/chris/src/rest -bptf 45 5 fbp
// r=1.0 identical voxels 73% max difference 0.000488281
if (hp_sigma > 0)
xmemcpy(imgIn, imgOut, nvol * sizeof(flt));
for (int v = 0; v < nvol; v++) { //each volume
double sum = 0.0;
for (int k = lpStart[v]; k <= lpEnd[v]; k++) //for each index in kernel
sum += imgIn[k] * lp0[k - v];
imgOut[v] = sum * lpSumWt[v];
} // for each volume
} //lp_sigma > 0
//write filtered data
j = 0;
for (size_t v = i; v < nim->nvox; v += nvox3D) {
img[v] = imgOut[j];
j++;
}
_mm_free(imgIn);
_mm_free(imgOut);
}
if (hp_sigma > 0) { //initialize high-pass reuseables
_mm_free(hp);
_mm_free(hpStart);
_mm_free(hpEnd);
_mm_free(hpSumX);
_mm_free(hpDenom);
_mm_free(hpSumWt);
}
if (lp_sigma > 0) { //initialize high-pass reuseables
_mm_free(lp);
_mm_free(lpStart);
_mm_free(lpEnd);
_mm_free(lpSumWt);
}
return 0;
} // nifti_bptf()
#endif
staticx int nifti_demean(nifti_image *nim) {
if (nim->datatype != DT_CALC)
return 1;
size_t nvox3D = nim->nx * nim->ny * MAX(1, nim->nz);
if (nvox3D < 1)
return 1;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol < 1) {
printfx("demean requires 4D datasets\n");
return 1;
}
flt *img = (flt *)nim->data;
#pragma omp parallel for
for (size_t i = 0; i < nvox3D; i++) {
double sum = 0.0;
for (size_t v = i; v < nim->nvox; v += nvox3D)
sum += img[v];
double mean = sum / nvol;
for (size_t v = i; v < nim->nvox; v += nvox3D)
img[v] -= mean;
}
return 0;
}
#ifndef USING_WASM
staticx int nifti_dim_reduce(nifti_image *nim, enum eDimReduceOp op, int dim, int percentage) {
//e.g. nifti_dim_reduce(nim, Tmean, 4) reduces 4th dimension, saving mean
//int nReduce = nim->dim[dim];
int nReduce = 0;
if (dim == 1) nReduce = nim->nx;
if (dim == 2) nReduce = nim->ny;
if (dim == 3) nReduce = nim->nz;
if (dim == 4) nReduce = nim->nt;
if ((nReduce <= 1) || (dim < 1) || (dim > 4))
return 0; //nothing to reduce, fslmaths does not generate an error
if ((nim->nvox < 1) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1))
return 1;
//size_t nvox3D = nim->nx * nim->ny * nim->nz;
//int nvol = nim->nvox / nvox3D;
//if ((nvox3D * nvol) != nim->nvox) return 1;
if (nim->datatype != DT_CALC)
return 1;
if (nim->ndim > 4)
printfx("dimension reduction collapsing %" PRId64 "D into to 4D\n", nim->ndim);
int dims[8], indims[8];
for (int i = 0; i < 8; i++)
dims[i] = 0;
dims[1] = nim->nx;
dims[2] = nim->ny;
dims[3] = nim->nz;
//for (int i = 0; i < 4; i++)
// dims[i] = MAX(nim->dim[i], 1);
//XYZT limits to 4 dimensions, so collapse dims [4,5,6,7]
dims[4] = nim->nvox / (dims[1] * dims[2] * dims[3]);
for (int i = 5; i < 8; i++)
dims[i] = 1;
for (int i = 0; i < 8; i++)
indims[i] = dims[i];
if ((dims[1] * dims[2] * dims[3] * dims[4]) != nim->nvox)
return 1; //e.g. data in dim 5..7!
dims[dim] = 1;
if (dim == 4)
dims[0] = 3; //reduce 4D to 3D
size_t nvox = dims[1] * dims[2] * dims[3] * dims[4];
flt *i32 = (flt *)nim->data;
void *dat = (void *)calloc(1, nim->nvox * sizeof(flt));
flt *o32 = (flt *)dat;
int collapseStep; //e.g. if we collapse 4th dimension, we will collapse across voxels separated by X*Y*Z
if (dim == 1)
collapseStep = 1; //collapse by columns
else if (dim == 2)
collapseStep = indims[1]; //collapse by rows
else if (dim == 3)
collapseStep = indims[1] * indims[2]; //collapse by slices
else
collapseStep = indims[1] * indims[2] * indims[3]; //collapse by volumes
int xy = dims[1] * dims[2];
int xyz = xy * dims[3];
if ((op == Tmedian) || (op == Tstd) || (op == Tperc) || (op == Tar1)) {
//for even number of items, two options for median, consider 4 volumes ranked
// meam of 2nd and 3rd: problem one can return values not in data
// 2nd value. Representative
//here we use the latter approach
//int itm = ((nReduce-1) * 0.5);
int itm = (nReduce * 0.5); //seems correct tested with odd and even number of volumes
if (op == Tperc) {
double frac = ((double)percentage) / 100.0;
//itm = ((nReduce-1) * frac);
itm = ((nReduce)*frac);
itm = MAX(itm, 0);
itm = MIN(itm, nReduce - 1);
}
#pragma omp parallel for
for (size_t i = 0; i < nvox; i++) {
flt *vxls = (flt *)_mm_malloc((nReduce) * sizeof(flt), 64);
size_t inPos = i;
if (dim < 4) { //i is in output space, convert to input space, allows single loop for OpenMP
int T = (i / xyz); //volume
int r = i % (xyz);
int Z = (r / xy); //slice
r = r % (xy);
int Y = (r / dims[1]); //row
int X = r % dims[1];
inPos = X + (Y * indims[1]) + (Z * indims[1] * indims[2]) + (T * indims[1] * indims[2] * indims[3]);
}
for (int v = 0; v < nReduce; v++) {
vxls[v] = i32[inPos];
inPos += collapseStep;
}
if ((op == Tstd) || (op == Tar1)) {
//computed in cache, far fewer operations than Welford
//note 64-bit double precision even if 32-bit DT_CALC
//neither precision gives identical results
// double precision attenuates catastrophic cancellation
double sum = 0.0;
for (int v = 0; v < nReduce; v++)
sum += vxls[v];
double mean = sum / nReduce;
double sumSqr = 0.0;
for (int v = 0; v < nReduce; v++)
sumSqr += sqr(vxls[v] - mean);
if (op == Tstd)
o32[i] = sqrt(sumSqr / (nReduce - 1));
else { //Tar1
if (sumSqr == 0.0) {
o32[i] = 0.0;
continue;
}
for (int v = 0; v < nReduce; v++)
vxls[v] = vxls[v] - mean; //demean
double r = 0.0;
for (int v = 1; v < nReduce; v++)
r += (vxls[v] * vxls[v - 1]) / sumSqr;
o32[i] = r;
}
} else { //Tperc or Tmedian
qsort(vxls, nReduce, sizeof(flt), compare);
o32[i] = vxls[itm];
}
_mm_free(vxls);
} //for i: each voxel
} else {
#pragma omp parallel for
for (size_t i = 0; i < nvox; i++) {
size_t inPos = i; //ok if dim==4
if (dim < 4) { //i is in output space, convert to input space, allows single loop for OpenMP
int T = (i / xyz); //volume
int r = i % (xyz);
int Z = (r / xy); //slice
r = r % (xy);
int Y = (r / dims[1]); //row
int X = r % dims[1];
inPos = X + (Y * indims[1]) + (Z * indims[1] * indims[2]) + (T * indims[1] * indims[2] * indims[3]);
}
double sum = 0.0;
flt mx = i32[inPos];
flt mn = mx;
int mxn = 0;
//flt sd = 0.0;
//flt mean = 0.0;
for (int v = 0; v < nReduce; v++) {
flt f = i32[inPos];
sum += f;
if (f > mx) {
mx = f;
mxn = v;
}
mn = MIN(mn, f);
//Welford https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
// 2-pass method faster
//flt delta = f - mean;
//mean = mean + delta / (v+1);
//sd = sd + delta*(f- mean);
inPos += collapseStep;
}
if (op == Tmean)
o32[i] = sum / nReduce; //mean
else if (op == Tmax)
o32[i] = mx; //max
else if (op == Tmaxn)
o32[i] = mxn; //maxn
else if (op == Tmin)
o32[i] = mn; //min
}
} //if opel
nim->nvox = nvox;
//for (int i = 0; i < 4; i++)
// nim->dim[i] = dims[i];
nim->ndim = dims[0];
nim->nx = dims[1];
nim->ny = dims[2];
nim->nz = dims[3];
nim->nt = dims[4];
nim->nu = dims[5];
nim->nv = dims[6];
nim->nw = dims[7];
free(nim->data);
nim->data = dat;
return 0;
} //Tar1
#endif
staticx int *make_kernel_gauss(nifti_image *nim, int *nkernel, double sigmamm) {
sigmamm = fabs(sigmamm);
if (sigmamm == 0.0)
return NULL;
double mmCutoff = sigmamm * 6.0; //maximum extent
int x = (2 * floor(mmCutoff / nim->dx)) + 1;
int y = (2 * floor(mmCutoff / nim->dy)) + 1;
int z = (2 * floor(mmCutoff / nim->dz)) + 1;
int xlo = (int)(-x / 2);
int ylo = (int)(-y / 2);
int zlo = (int)(-z / 2);
//betterthanfsl
// fsl computes gaussian for all values in cube
// from first principles, a spherical filter has less bias
// since weighting is very low at these edge voxels, it has little impact on
// "-fmean", however with other filters like "dilM", fsl's solution works like
// a "box" filter, not a "sphere" filter
// default is to clone fsl
#ifdef betterthanfsl //true sphere at cutouff
//first pass: determine number of surviving voxels (n)
int n = 0;
for (int zi = zlo; zi < (zlo + z); zi++)
for (int yi = ylo; yi < (ylo + y); yi++)
for (int xi = xlo; xi < (xlo + x); xi++) {
flt dx = (xi * nim->dx);
flt dy = (yi * nim->dy);
flt dz = (zi * nim->dz);
flt dist = sqrt(dx * dx + dy * dy + dz * dz);
if (dist > mmCutoff)
continue;
n++;
}
*nkernel = n;
int kernelWeight = (int)((double)INT_MAX / (double)n); //requires <limits.h>
int *kernel = (int *)_mm_malloc((n * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight
double *wt = (double *)_mm_malloc((n) * sizeof(double), 64); //precess weight: temporary
//second pass: fill surviving voxels
int i = 0;
double expd = 2.0 * sigmamm * sigmamm;
for (int zi = zlo; zi < (zlo + z); zi++)
for (int yi = ylo; yi < (ylo + y); yi++)
for (int xi = xlo; xi < (xlo + x); xi++) {
flt dx = (xi * nim->dx);
flt dy = (yi * nim->dy);
flt dz = (zi * nim->dz);
flt dist = sqrt(dx * dx + dy * dy + dz * dz);
if (dist > mmCutoff)
continue;
kernel[i] = xi + (yi * nim->nx) + (zi * nim->nx * nim->ny);
kernel[i + n] = xi; //left-right wrap detection
kernel[i + n + n] = yi; //anterior-posterior wrap detection
//kernel[i+n+n+n] = kernelWeight; //kernel height
wt[i] = exp(-1.0 * (dist * dist) / expd);
i++;
}
#else
int n = x * y * z;
*nkernel = n;
int *kernel = (int *)_mm_malloc((n * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight
double *wt = (double *)_mm_malloc((n) * sizeof(double), 64); //precess weight: temporary
int i = 0;
double expd = 2.0 * sigmamm * sigmamm;
for (int zi = zlo; zi < (zlo + z); zi++)
for (int yi = ylo; yi < (ylo + y); yi++)
for (int xi = xlo; xi < (xlo + x); xi++) {
flt dx = (xi * nim->dx);
flt dy = (yi * nim->dy);
flt dz = (zi * nim->dz);
flt dist = sqrt(dx * dx + dy * dy + dz * dz);
//if (dist > mmCutoff) continue; //<- fsl fills all
kernel[i] = xi + (yi * nim->nx) + (zi * nim->nx * nim->ny);
kernel[i + n] = xi; //left-right wrap detection
kernel[i + n + n] = yi; //anterior-posterior wrap detection
//kernel[i+n+n+n] = kernelWeight; //kernel height
wt[i] = exp(-1.0 * (dist * dist) / expd);
i++;
}
#endif
double sum = 0.0;
for (int i = 0; i < n; i++)
sum += wt[i];
//sum of entire gaussian is 1
double scale = 1.0 / sum;
scale *= (double)INT_MAX; //we use integer scaling: in future faster to typecast integer as flt (if int=32bit) or double (if int=64bit)
for (int i = 0; i < n; i++)
kernel[i + n + n + n] = wt[i] * scale;
_mm_free(wt);
return kernel;
} //make_kernel_gauss()
staticx flt calmax(nifti_image *nim) {
if ((nim->nvox < 1) || (nim->datatype != DT_CALC))
return 0.0;
flt *in32 = (flt *)nim->data;
flt mx = in32[0];
for (size_t i = 0; i < nim->nvox; i++)
mx = MAX(mx, in32[i]);
return mx;
}
staticx flt calmin(nifti_image *nim) {
if ((nim->nvox < 1) || (nim->datatype != DT_CALC))
return 0.0;
flt *in32 = (flt *)nim->data;
flt mn = in32[0];
for (size_t i = 0; i < nim->nvox; i++)
mn = MIN(mn, in32[i]);
return mn;
}
#ifndef USING_WASM
staticx int nifti_tensor_2(nifti_image *nim, int lower2upper) {
int nvox3D = nim->nx * nim->ny * nim->nz;
if ((nim->datatype != DT_CALC) || (nvox3D < 1))
return 1;
int nVol = (int)(nim->nvox / nvox3D);
if (nVol != 6) {
printfx("nifti_tensor_2: input must have precisely 6 volumes (not %d)\n", nVol);
return 1;
}
//3dAFNItoNIFTI does not set intent_code to NIFTI_INTENT_SYMMATRIX, so check dimensions
if ((lower2upper) && (nim->nt == 6))
printfx("nifti_tensor_2: check images (header suggests already in upper triangle format)\n");
if ((!lower2upper) && (nim->nt == 6))
printfx("nifti_tensor_2: check images (header suggests already in lower triangle format)\n");
//lower xx xy yy xz yz zz
//upper xx xy xz yy yz zz
//swap volumes 3 and 4
flt *in32 = (flt *)nim->data;
flt *tmp = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64);
flt *v3 = in32 + (2 * nvox3D);
flt *v4 = in32 + (3 * nvox3D);
xmemcpy(tmp, v4, nvox3D * sizeof(flt));
xmemcpy(v4, v3, nvox3D * sizeof(flt));
xmemcpy(v3, tmp, nvox3D * sizeof(flt));
_mm_free(tmp);
if (lower2upper) {
//FSL uses non-standard upper triangle
//nim->dim[0] = 4;
//for (int i = 4; i < 8; i++)
// nim->dim[i] = 1;
//nim->dim[4] = 6;
nim->ndim = 4;
nim->nt = 6;
nim->nu = 1;
nim->nv = 1;
nim->nw = 1;
} else { //upper2lower
//lower is NIfTI default, used by AFNI, Camino, ANTS
nim->intent_code = NIFTI_INTENT_SYMMATRIX;
/*! To store an NxN symmetric matrix at each voxel:
- dataset must have a 5th dimension
- intent_code must be NIFTI_INTENT_SYMMATRIX
- dim[5] must be N*(N+1)/2
- intent_p1 must be N (in float format)
- the matrix values A[i][[j] are stored in row-order:
- A[0][0]
- A[1][0] A[1][1]
- A[2][0] A[2][1] A[2][2]
- etc.: row-by-row*/
//nim->dim[0] = 5;
//for (int i = 4; i < 8; i++)
// nim->dim[i] = 1;
//nim->dim[5] = 6;
nim->ndim = 5;
nim->nt = 1;
nim->nu = 6;
nim->nv = 1;
nim->nw = 1;
}
return 0;
}
#endif
staticx int nifti_tensor_decomp(nifti_image *nim, int isUpperTriangle) {
// MD= (Dxx+Dyy+Dzz)/3
//https://github.com/ANTsX/ANTs/wiki/Importing-diffusion-tensor-data-from-other-software
// dtifit produces upper-triangular order: xx xy xz yy yz zz
//MD = 1/3*(Dxx+Dyy+Dzz)
//FA= sqrt(3/2)*sqrt(((Dx-MD)^2+(Dy-MD)^2+(Dz-MD^2))/(Dx^2+Dy^2+Dz^2))
//fslmaths tensor.nii -tensor_decomp bork.nii
// 3dDTeig -uddata -sep_dsets -prefix AFNIdwi.nii tensor.nii
//3dDTeig expects LOWER diagonal order unless -uddata
// Dxx,Dxy,Dyy,Dxz,Dyz,Dzz
// https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dDTeig.html
//dxx, dxy, dyy, dxz, dyz, dzz
// 3dDTeig -uddata -prefix AFNIdwi.nii tensor.nii
// fslmaths tensor.nii -tensor_decomp bork.nii
// Creates 5*3D and 3*4D files for a total of 14 volumes L1,L2,L3,V1(3),V2(3),V3(3),FA,MD
#ifdef tensor_decomp
if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nvox3D = nim->nx * nim->ny * nim->nz;
int nVol = (int)(nim->nvox / nvox3D);
if (nVol != 6) {
printfx("nifti_tensor_decomp: input must have precisely 6 volumes (not %d)\n", nVol);
return 1;
}
flt *in32 = (flt *)nim->data;
//detect if data is upper or lower triangle
// The "YY" component should be brighter (strongly positive) than the off axis XZ
#define detectUpperOrLower
#ifdef detectUpperOrLower
double sumV3 = 0.0; //3rd volume, YY for lower, XZ for upper
double sumV4 = 0.0; //4th volume, XZ for lower, YY for upper
flt *v32 = in32 + (nvox3D * 2); //offset to 3rd volume
for (size_t i = 0; i < nvox3D; i++)
sumV3 += v32[i];
v32 = in32 + (nvox3D * 3); //offset to 4th volume
for (size_t i = 0; i < nvox3D; i++)
sumV4 += v32[i];
if ((sumV4 > sumV3) && (!isUpperTriangle))
printfx("nifti_tensor_decomp: check results, input looks like UPPER triangle.\n");
if ((sumV4 < sumV3) && (isUpperTriangle))
printfx("nifti_tensor_decomp: check results, input looks like LOWER triangle.\n");
#endif
flt *out32 = (flt *)_mm_malloc(14 * nvox3D * sizeof(flt), 64);
for (size_t i = 0; i < nvox3D; i++) {
//n.b. in6 and out14 are ALWAYS float regradless of DT32, e.g. single even if DT=double
float *in6 = (float *)_mm_malloc(6 * sizeof(float), 64);
float *out14 = (float *)_mm_malloc(14 * sizeof(float), 64);
size_t iv = i;
for (int v = 0; v < 6; v++) {
in6[v] = in32[iv];
iv += nvox3D;
}
EIG_tsfunc(0.0, 0.0, 0, in6, 0.0, 0.0, NULL, 0, out14, isUpperTriangle);
size_t ov = i;
for (int v = 0; v < 14; v++) {
out32[ov] = out14[v];
ov += nvox3D;
}
_mm_free(out14);
_mm_free(in6);
}
free(nim->data);
// Creates 5*3D and 3*4D files for a total of 14 volumes L1(0),L2(1),L3(2),V1(3,4,5),V2(6,7,8),V3(9,10,11),FA(12),MD(13)
flt *outv;
//save 4D images
nim->cal_min = -1;
nim->cal_max = 1;
nim->nvox = nvox3D * 3;
nim->ndim = 4;
nim->nt = 3;
nim->nu = 1;
nim->nv = 1;
nim->nw = 1;
//nim->dim[0] = 4;
//nim->dim[4] = 3;
//for (int i = 5; i < 8; i++)
// nim->dim[i] = 1;
//void * dat = (void *)calloc(1, 3*nvox3D * sizeof(flt)) ;
//nim->data = dat;
//flt * fa32 = (flt *) dat;
//save V1
outv = out32 + (nvox3D * 3);
nim->data = (void *)outv;
nifti_save(nim, "_V1");
//save V2
outv = out32 + (nvox3D * 6);
//xmemcpy(fa32, outv, 3*nvox3D*sizeof(flt));
nim->data = (void *)outv;
nifti_save(nim, "_V2");
//save V3
outv = out32 + (nvox3D * 9);
//xmemcpy(fa32, outv, 3*nvox3D*sizeof(flt));
nim->data = (void *)outv;
nifti_save(nim, "_V3");
//release 4D memory
//free(dat);
//save 3D images
nim->cal_min = 0;
nim->cal_max = 0;
nim->nvox = nvox3D * 1;
nim->ndim = 3;
nim->nt = 1;
//nim->dim[0] = 3;
//nim->dim[4] = 1;
//save L1
outv = out32;
//xmemcpy(fa32, outv, nvox3D*sizeof(flt));
nim->data = (void *)outv;
nim->cal_max = calmax(nim);
nifti_save(nim, "_L1");
//save L2
outv = out32 + (nvox3D * 1);
//xmemcpy(fa32, outv, nvox3D*sizeof(flt));
nim->data = (void *)outv;
nim->cal_max = calmax(nim);
nifti_save(nim, "_L2");
//save L3
outv = out32 + (nvox3D * 2);
//xmemcpy(fa32, outv, nvox3D*sizeof(flt));
nim->data = (void *)outv;
nim->cal_max = calmax(nim);
nifti_save(nim, "_L3");
//save MD
outv = out32 + (nvox3D * 13);
//xmemcpy(fa32, outv, nvox3D*sizeof(flt));
nim->data = (void *)outv;
nim->cal_min = calmin(nim);
nim->cal_max = calmax(nim);
nifti_save(nim, "_MD");
//single volume data
void *dat = (void *)calloc(1, nvox3D * sizeof(flt));
nim->data = dat;
flt *fa32 = (flt *)dat;
//save MO
//MODE https://www.jiscmail.ac.uk/cgi-bin/webadmin?A2=FSL;4fbed3d1.1103
// compute MO (MODE) from L1, L2, L3, MD
//e1=l1-MD, e2=l2-MD, e3=l3-MD;
//n = (e1 + e2 - 2*e3)*(2*e1 - e2 - e3)*(e1 - 2*e2 + e3);
//d = (e1*e1 + e2*e2 + e3*e3 - e1*e2 - e2*e3 - e1*e3);
//d = 2*d*d*d;
//mode = n/d;
//something is wrong with this formula.
// a. Ennis 2006 includes a sqrt that can not be factored out
// b. results differ from fslmaths
nim->cal_min = -1;
nim->cal_max = 1;
flt *L1 = out32;
flt *L2 = out32 + (nvox3D * 1);
flt *L3 = out32 + (nvox3D * 2);
flt *MD = out32 + (nvox3D * 13);
for (size_t i = 0; i < nvox3D; i++) {
flt e1 = L1[i] - MD[i];
flt e2 = L2[i] - MD[i];
flt e3 = L3[i] - MD[i];
flt n = (e1 + e2 - 2 * e3) * (2 * e1 - e2 - e3) * (e1 - 2 * e2 + e3);
flt d = (e1 * e1 + e2 * e2 + e3 * e3 - e1 * e2 - e2 * e3 - e1 * e3);
d = sqrt(d); //Correlation r = 0.999746
d = 2 * d * d * d;
//d = sqrt(d); //Correlation r = 0.990319
if (d != 0)
d = n / d; //mode
d = MIN(d, 1.0);
d = MAX(d, -1.0);
fa32[i] = d;
}
nifti_save(nim, "_MO");
//save FA
outv = out32 + (nvox3D * 12);
xmemcpy(fa32, outv, nvox3D * sizeof(flt));
nim->cal_min = 0;
nim->cal_max = 1;
nifti_save(nim, "_FA");
//keep FA in memory
nim->cal_max = 0;
_mm_free(out32);
return 0;
#else
printfx("not compiled to support tensor_decomp\n");
return 1;
#endif
} //nifti_tensor_decomp()
staticx void kernel3D_dilall(nifti_image *nim, int *kernel, int nkernel, int vol) {
int nVox3D = nim->nx * nim->ny * nim->nz;
flt *f32 = (flt *)nim->data;
f32 += (nVox3D * vol);
flt *inf32 = (flt *)_mm_malloc(nVox3D * sizeof(flt), 64);
xmemcpy(inf32, f32, nVox3D * sizeof(flt));
int nxy = nim->nx * nim->ny;
size_t nZero = 1;
while (nZero > 0) {
nZero = 0;
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
if (f32[i] != 0.0)
continue;
int nNot0 = 0;
flt sum = 0.0f;
for (size_t k = 0; k < nkernel; k++) {
size_t vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] == 0.0))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
nNot0++;
sum += inf32[vx];
} //for k
if (nNot0 > 0)
f32[i] = sum / nNot0;
nZero++;
} //for x
} //for y
} //for z
xmemcpy(inf32, f32, nVox3D * sizeof(flt));
//printf("n=0: %zu\n", nZero);
} //nZero > 0
_mm_free(inf32);
} //kernel3D_dilall()
staticx int kernel3D(nifti_image *nim, enum eOp op, int *kernel, int nkernel, int vol) {
int nVox3D = nim->nx * nim->ny * nim->nz;
flt *f32 = (flt *)nim->data;
f32 += (nVox3D * vol);
flt *inf32 = (flt *)_mm_malloc(nVox3D * sizeof(flt), 64);
xmemcpy(inf32, f32, nVox3D * sizeof(flt));
int nxy = nim->nx * nim->ny;
#ifndef USING_WASM //WASM does not support qsort
if (op == fmediank) {
flt *vxls = (flt *)_mm_malloc((nkernel) * sizeof(flt), 64);
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
int nOK = 0;
for (size_t k = 0; k < nkernel; k++) {
size_t vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
vxls[nOK] = inf32[vx];
nOK++;
} //for k
qsort(vxls, nOK, sizeof(flt), compare);
int itm = (nOK * 0.5);
f32[i] = vxls[itm];
} //for x
} //for y
} //for z
_mm_free(vxls);
} else
#endif //WASM does not support qsort
if (op == dilMk) {
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
if (f32[i] != 0.0)
continue;
int nNot0 = 0;
flt sum = 0.0f;
for (size_t k = 0; k < nkernel; k++) {
size_t vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] == 0.0))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
nNot0++;
sum += inf32[vx];
} //for k
if (nNot0 > 0)
f32[i] = sum / nNot0;
} //for x
} //for y
} //for z
} else if (op == dilDk) { //maximum - fslmaths 6.0.1 emulation, note really MODE, max non-zero
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
if (f32[i] != 0.0)
continue;
//flt mx = -INFINITY;
flt mx = NAN;
for (int k = 0; k < nkernel; k++) {
int vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
flt v = inf32[vx];
if (v == 0.0)
continue;
mx = MAX(mx, inf32[vx]);
//with dilD a input voxel of 0
} //for k
//https://stackoverflow.com/questions/570669/checking-if-a-double-or-float-is-nan-in-c
// f != f will be true only if f is NaN
if (!(mx != mx))
f32[i] = mx;
} //for x
} //for y
} //for z
} else if (op == dilFk) { //maximum - fslmaths 6.0.1 appears to use "dilF" when the user requests "dilD"
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
flt mx = f32[i];
for (int k = 0; k < nkernel; k++) {
int vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] <= mx))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
mx = MAX(mx, inf32[vx]);
//if (mx < 0) continue; //with dilF, do not make a zero voxel darker than 0
} //for k
f32[i] = mx;
} //for x
} //for y
} //for z
} else if (op == dilallk) { // -dilall : Apply -dilM repeatedly until the entire FOV is covered");
kernel3D_dilall(nim, kernel, nkernel, vol);
} else if (op == eroFk) { //Minimum filtering of all voxels
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
for (int k = 0; k < nkernel; k++) {
int vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
f32[i] = MIN(f32[i], inf32[vx]);
} //for k
} //for x
} //for y
} //for z
} else if (op == fmeank) { //Mean filtering, kernel weighted (conventionally used with gauss kernel) //u22a
flt *kwt = (flt *)_mm_malloc(nkernel * sizeof(flt), 64);
for (int k = 0; k < nkernel; k++)
kwt[k] = ((double)kernel[k + nkernel + nkernel + nkernel] / (double)INT_MAX);
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
flt sum = 0.0f;
flt wt = 0.0f;
for (int k = 0; k < nkernel; k++) {
int vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
sum += (inf32[vx] * kwt[k]);
wt += kwt[k];
} //for k
f32[i] = sum / wt;
} //for x
} //for y
} //for z
_mm_free(kwt);
} else if (op == fmeanzerok) { //Mean filtering, kernel weighted (negative and positive samples sume to zero: laplacian kernel) //u22a
flt *kwt = (flt *)_mm_malloc(nkernel * sizeof(flt), 64);
for (int k = 0; k < nkernel; k++)
kwt[k] = ((double)kernel[k + nkernel + nkernel + nkernel] / (double)INT_MAX);
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
flt sumPos = 0.0f;
flt wtPos = 0.0f;
flt sumNeg = 0.0f;
flt wtNeg = 0.0f;
for (int k = 0; k < nkernel; k++) {
int vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
if (kwt[k] > 0.0) {
sumPos += (inf32[vx] * kwt[k]);
wtPos += kwt[k];
} else {
sumNeg += (inf32[vx] * kwt[k]);
wtNeg += -kwt[k];
}
} //for k
flt val = 0.0;
if (wtPos > 0.0)
val += sumPos / wtPos;
if (wtNeg > 0.0)
val += sumNeg / wtNeg;
f32[i] = val;
} //for x
} //for y
} //for z
_mm_free(kwt);
} else if (op == fmeanuk) { //Mean filtering, kernel weighted, un-normalized (gives edge effects)
flt *kwt = (flt *)_mm_malloc(nkernel * sizeof(flt), 64);
for (int k = 0; k < nkernel; k++)
kwt[k] = ((double)kernel[k + nkernel + nkernel + nkernel] / (double)INT_MAX);
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
flt sum = 0.0f;
//flt wt = 0.0f;
for (int k = 0; k < nkernel; k++) {
int vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
sum += (inf32[vx] * kwt[k]);
//wt += kwt[k];
} //for k
//f32[i] = sum / wt;
f32[i] = sum;
} //for x
} //for y
} //for z
_mm_free(kwt);
} else if (op == erok) {
for (int z = 0; z < nim->nz; z++) {
int i = (z * nxy) - 1; //offset
for (int y = 0; y < nim->ny; y++) {
for (int x = 0; x < nim->nx; x++) {
i++;
if (f32[i] == 0.0)
continue;
for (int k = 0; k < nkernel; k++) {
int vx = i + kernel[k];
if ((vx < 0) || (vx >= nVox3D) || (inf32[vx] != 0.0))
continue;
//next handle edge cases
int dx = x + kernel[k + nkernel];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kernel[k + nkernel + nkernel];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
f32[i] = 0.0;
} //for k
} //for x
} //for y
} //for z
} else {
printfx("kernel3D: Unsupported operation\n");
_mm_free(inf32);
return 1;
}
_mm_free(inf32);
return 0;
} //kernel3D
staticx int nifti_kernel(nifti_image *nim, enum eOp op, int *kernel, int nkernel) {
if ((nim->nvox < 1) || (nim->nx < 2) || (nim->ny < 2) || (nim->nz < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nVox3D = nim->nx * nim->ny * nim->nz;
int nVol = (int)(nim->nvox / nVox3D);
if (nVol < 1)
return 1;
if ((nkernel < 1) || (kernel == NULL))
return 1;
for (int v = 0; v < nVol; v++) {
int ok = kernel3D(nim, op, kernel, nkernel, v);
if (ok != 0)
return ok;
}
return 0;
}
staticx int nifti_zero_crossing(nifti_image *nim, int orient) {
//https://homepages.inf.ed.ac.uk/rbf/HIPR2/zeros.htm
// implements: A better technique is to consider points on both sides of the threshold boundary, and choose the one with the lowest absolute magnitude of the Laplacian, which will hopefully be closest to the zero crossing.
//we will define edges as voxels with zero crossings
// orient refers to slice direction: 1=x=Sagittal, 2=y=Coronal, 3=z=Axial, else 3D
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
int nVol = nim->nvox / nvox3D;
int64_t nvox4D = nvox3D * nVol;
flt *inimg = (flt *)nim->data;
#pragma omp parallel for
for (int v = 0; v < nVol; v++) {
int nx = nim->nx;
int ny = nim->ny;
int nz = nim->nz;
flt * img = padImg3D(inimg, &nx, &ny, &nz);
memset(inimg, 0, nvox4D * sizeof(flt)); //zero array
int xi = 1;
int yj = nx;
int zk = nx * ny;
//orient: only look for edges in 2D, ignore on dimesion
if (orient == 1) xi = yj;
if (orient == 2) yj = 1;
if (orient == 3) zk = 1;
int nxy = nx * ny;
for (int z = 1; z < (nz -1); z++)
for (int y = 1; y < (ny - 1); y++)
for (size_t x = 1; x < (nx - 1); x++) {
int64_t i = x + (y * nx) + (z * nxy);
flt val = img[i];
flt ival = -val;
//logic: opposite polarities cause negative sign: pos*neg = neg; pos*pos=pos; neg*neg=pos
//check six neighbors that share a face
if ((val > 0.0) && ((img[i-xi] <= ival) || (img[i+xi] <= ival)
|| (img[i-yj] <= ival) || (img[i+yj] <= ival)
|| (img[i-zk] <= ival) || (img[i+zk] <= ival) ))
inimg[0] = 1.0;
if ((val < 0.0) && ((img[i-xi] > ival) || (img[i+xi] > ival)
|| (img[i-yj] > ival) || (img[i+yj] > ival)
|| (img[i-zk] > ival) || (img[i+zk] > ival) ))
inimg[0] = 1.0;
inimg ++;
}
_mm_free(img);
}
nim->scl_inter = 0.0;
nim->scl_slope = 1.0;
nim->cal_min = 0.0;
nim->cal_max = 1.0;
return 0;
} //nifti_zero_crossing
#ifdef USING_TIMERS
double clockMsec() { //return milliseconds since midnight
struct timespec _t;
clock_gettime(CLOCK_MONOTONIC, &_t);
return _t.tv_sec*1000.0 + (_t.tv_nsec/1.0e6);
}
long timediff(double startTimeMsec, double endTimeMsec) {
return round(endTimeMsec - startTimeMsec);
}
#endif
staticx int nifti_dog(nifti_image *nim, flt SigmammPos, flt SigmammNeg, int orient) {
//Difference of Gaussians (DoG): difference ratio of 1.6 approximates a Laplacian of Gaussian
// https://homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm
flt kKernelWid = 2.5; //ceil(2.5)
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
if ((nvox3D < 3) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1) || (nim->datatype != DT_CALC)) {
printfx("Image dimensions too small for Difference of Gaussian.\n");
return 1;
}
if (SigmammPos == SigmammNeg) {
printfx("Difference of Gaussian requires two different sigma values.\n");
return 1;
}
if ((SigmammPos < 0) || (SigmammNeg < 0)) {
printfx("Difference of Gaussian requires positive values of sigma.\n");
return 1;
}
#ifdef USING_TIMERS
double startTime = clockMsec();
#endif
flt sigmaMn = MIN(SigmammNeg, SigmammPos);
flt sigmaMx = MAX(SigmammNeg, SigmammPos);
//Optimization: use results from narrow blur (sigmaMn) as inputs for wide blur (sigmaMx)
//consider desired blurs of 2mm and 3.2mm, we can instead compute 2mm and 2.5mmm
//only about 10% faster for difference ratio of 2.0, but also removes one copy
//https://computergraphics.stackexchange.com/questions/256/is-doing-multiple-gaussian-blurs-the-same-as-doing-one-larger-blur
sigmaMx = sqrt((sigmaMx*sigmaMx) - (sigmaMn*sigmaMn));
flt *inimg = (flt *)nim->data;
int nVol = nim->nvox / nvox3D;
int64_t nvox4D = nvox3D * nVol;
int ret = nifti_smooth_gauss(nim, sigmaMn, sigmaMn, sigmaMn, kKernelWid);
if (ret != 0) {
printfx("Gaussian smooth failed.\n");
return ret;
}
flt *imgMn = (flt *)_mm_malloc(nvox4D * sizeof(flt), 64);
for (int64_t i = 0; i < nvox4D; i++)
imgMn[i] = inimg[i];
ret = nifti_smooth_gauss(nim, sigmaMx, sigmaMx, sigmaMx, kKernelWid);
if (SigmammPos > SigmammNeg) {
for (int64_t i = 0; i < nvox4D; i++)
inimg[i] = inimg[i] - imgMn[i];
} else {
for (int64_t i = 0; i < nvox4D; i++)
inimg[i] = imgMn[i] - inimg[i];
}
_mm_free(imgMn);
if (orient >= 0)
ret = nifti_zero_crossing(nim, orient);
#ifdef USING_TIMERS
printfx("DoG time: %ld ms\n", timediff(startTime, clockMsec()));
#endif
return ret;
} // nifti_dog()
/*
staticx int nifti_dogNew(nifti_image *nim, flt Sigmamm, flt SigmammNeg, int isEdge) {
//Only one Gaussian blur - faster in theory, but slower in practice (kernel reads out of cache, kernel must be adjusted for edges)
#ifdef USING_TIMERS
double startTime = clockMsec();
#endif
flt kKernelWid = 2.5; //ceil(2.5)
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
if ((nvox3D < 3) || (nim->nx < 1) || (nim->ny < 1) || (nim->nz < 1) || (nim->datatype != DT_CALC)) {
printfx("Image dimensions too small for Difference of Gaussian.\n");
return 1;
}
int ret = nifti_smooth_gauss(nim, Sigmamm, Sigmamm, Sigmamm, kKernelWid);
if (ret != 0) {
printfx("Gaussian smooth failed.\n");
return ret;
}
int nkernel = 0; //number of voxels in kernel
int *kernel = NULL;
kernel = make_kernel(nim, &nkernel, 3, 3, 3);
//https://en.wikipedia.org/wiki/Discrete_Laplace_operator
//27 point stencil
// [2 3 2; 3 6 3; 2 3 2];
// [3 6 3; 6 -88 6; 3 6 3];
// [2 3 2; 3 6 3; 2 3 2];
int kernelWeight = floor(INT_MAX / 88.0);
int i = nkernel + nkernel + nkernel;
//slice below
kernel[i] = -2 * kernelWeight; i++;
kernel[i] = -3 * kernelWeight; i++;
kernel[i] = -2 * kernelWeight; i++;
kernel[i] = -3 * kernelWeight; i++;
kernel[i] = -6 * kernelWeight; i++;
kernel[i] = -3 * kernelWeight; i++;
kernel[i] = -2 * kernelWeight; i++;
kernel[i] = -3 * kernelWeight; i++;
kernel[i] = -2 * kernelWeight; i++;
//current slice
kernel[i] = -3 * kernelWeight; i++;
kernel[i] = -6 * kernelWeight; i++;
kernel[i] = -3 * kernelWeight; i++;
kernel[i] = -6 * kernelWeight; i++;
kernel[i] = 88 * kernelWeight; i++;
kernel[i] = -6 * kernelWeight; i++;
kernel[i] = -3 * kernelWeight; i++;
kernel[i] = -6 * kernelWeight; i++;
kernel[i] = -3 * kernelWeight; i++;
//slice above
kernel[i] = -2 * kernelWeight; i++;
kernel[i] = -3 * kernelWeight; i++;
kernel[i] = -2 * kernelWeight; i++;
kernel[i] = -3 * kernelWeight; i++;
kernel[i] = -6 * kernelWeight; i++;
kernel[i] = -3 * kernelWeight; i++;
kernel[i] = -2 * kernelWeight; i++;
kernel[i] = -3 * kernelWeight; i++;
kernel[i] = -2 * kernelWeight; i++;
enum eOp op = fmeanzerok; //u22
ret = nifti_kernel(nim, op, kernel, nkernel);
_mm_free(kernel);
if (isEdge)
ret = nifti_zero_crossing(nim, 0);
#ifdef USING_TIMERS
printfx("DoG time: %ld ms\n", timediff(startTime, clockMsec()));
#endif
return ret;
}*/
staticx int nifti_roi(nifti_image *nim, int xmin, int xsize, int ymin, int ysize, int zmin, int zsize, int tmin, int tsize) {
// "fslmaths LAS -roi 3 32 0 40 0 40 0 5 f "
int nt = nim->nvox / (nim->nx * nim->ny * nim->nz);
if ((nim->nvox < 1) || (nt < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
flt *f32 = (flt *)nim->data;
//if (neg_determ(nim))
// do something profound; //determinants do not seem to influence "-roi"?
int xmax = xmin + xsize - 1;
int ymax = ymin + ysize - 1;
int zmax = zmin + zsize - 1;
int tmax = tmin + tsize - 1;
//printf("%d..%d", zmin, zmax);
size_t i = 0;
for (int t = 0; t < nt; t++) {
int tOK = 1;
if ((t < tmin) || (t > tmax))
tOK = 0;
for (int z = 0; z < nim->nz; z++) {
int zOK = 1;
if ((z < zmin) || (z > zmax))
zOK = 0;
for (int y = 0; y < nim->ny; y++) {
int yOK = 1;
if ((y < ymin) || (y > ymax))
yOK = 0;
for (int x = 0; x < nim->nx; x++) {
int xOK = 1;
if ((x < xmin) || (x > xmax))
xOK = 0;
if ((xOK == 0) || (yOK == 0) || (zOK == 0) || (tOK == 0))
f32[i] = 0.0;
i++;
} //x
} //y
} //z
} //t
return 0;
}
staticx int nifti_sobel(nifti_image *nim, int offc, int isBinary) {
//sobel is simply one kernel pass per dimension.
// this could be achieved with successive passes of "-kernel"
// here it is done in a single pass for cache efficiency
// https://en.wikipedia.org/wiki/Sobel_operator
int vox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
if (nim->datatype != DT_CALC)
return 1;
int nvol = nim->nvox / vox3D;
int numk = 6; //center voxel and all its neighbors
int *kx = (int *)_mm_malloc((numk * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight
int *ky = (int *)_mm_malloc((numk * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight
int *kz = (int *)_mm_malloc((numk * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight
int i = 0;
for (int x = 0; x <= 1; x++)
for (int y = -1; y <= 1; y++) {
int sgn = (2 * x) - 1; //-1 or +1
int weight = sgn * (2 - abs(y));
//kx compare left and right
kx[i + numk] = (2 * x) - 1; //left/right wrap
kx[i + numk + numk] = y; //anterior/posterior wrap
kx[i] = kx[i + numk] + (kx[i + numk + numk] * (nim->nx)); //voxel offset
kx[i + numk + numk + numk] = weight; //weight
//ky compare anterior and posterior
ky[i + numk] = y; //left/right wrap
ky[i + numk + numk] = (2 * x) - 1; //anterior/posterior wrap
ky[i] = ky[i + numk] + (ky[i + numk + numk] * (nim->nx)); //voxel offset
ky[i + numk + numk + numk] = weight; //weight
//kz superior/inferior
kz[i + numk] = y; //left/right wrap
kz[i + numk + numk] = 0; //anterior/posterior wrap
kz[i] = y + (((2 * x) - 1) * nim->nx * nim->ny); //voxel offset
kz[i + numk + numk + numk] = weight; //weight
//printf("x%d y%d wt%d\n", kx[i+numk], kx[i+numk+numk], kx[i+numk+numk+numk]);
//printf("x%d y%d wt%d\n", ky[i+numk], ky[i+numk+numk], ky[i+numk+numk+numk]);
i++;
} //for y
flt *i32 = (flt *)nim->data; //input volumes
#pragma omp parallel for
for (int v = 0; v < nvol; v++) {
flt *iv32 = i32 + (v * vox3D);
flt *imgin = _mm_malloc(vox3D * sizeof(flt), 64); //input values prior to blur
//edge information:
flt mx = 0.0;
uint8_t *imgdir = _mm_malloc(vox3D * sizeof(uint8_t), 64); //image direction
if (isBinary)
memset(imgdir, 0, vox3D * sizeof(uint8_t));
xmemcpy(imgin, iv32, vox3D * sizeof(flt));
int i = 0;
for (int z = 0; z < nim->nz; z++)
for (int y = 0; y < nim->ny; y++)
for (size_t x = 0; x < nim->nx; x++) {
//compute z gradient
flt gx = 0.0f;
for (size_t k = 0; k < numk; k++) {
size_t vx = i + kx[k];
if ((vx < 0) || (vx >= vox3D))
continue;
//next handle edge cases
int dx = x + kx[k + numk];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kx[k + numk + numk];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
gx += imgin[vx] * kx[k + numk + numk + numk];
} //for k
//compute y gradient
flt gy = 0.0f;
for (size_t k = 0; k < numk; k++) {
size_t vx = i + ky[k];
if ((vx < 0) || (vx >= vox3D))
continue;
//next handle edge cases
int dx = x + ky[k + numk];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + ky[k + numk + numk];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
gy += imgin[vx] * ky[k + numk + numk + numk];
} //for k
//compute z gradient
flt gz = 0.0f; //always 0 for 2D, we could add conditional to skip but optimize for 3D
for (size_t k = 0; k < numk; k++) {
size_t vx = i + kz[k];
if ((vx < 0) || (vx >= vox3D))
continue;
//next handle edge cases
int dx = x + kz[k + numk];
if ((dx < 0) || (dx >= nim->nx))
continue; //wrapped left-right
int dy = y + kz[k + numk + numk];
if ((dy < 0) || (dy >= nim->ny))
continue; //wrapped anterior-posterior
gz += imgin[vx] * kz[k + numk + numk + numk];
} //for k
gx = sqr(gx);
gy = sqr(gy);
gz = sqr(gz);
iv32[i] = sqrt(gx + gy + gz);
if (isBinary) {
mx = MAX(mx, iv32[i]);
if ((gx > gy) && (gx > gz))
imgdir[i] = 1; //left/right gradient is strongest
else if (gy > gz)
imgdir[i] = 2; //anterior/posterior gradient is strongest
else
imgdir[i] = 3; //superior/inferior gradient is strongest (or tie)
}
i++;
} //for x
if (isBinary) {
//magnitude in range 0..1, zero voxels below threshold
float scale = 1.0;
if (mx > 0.0)
scale = 1.0 / mx;
float thresh = 0.1;
for (int vx = 0; vx < vox3D; vx++) {
imgin[vx] = iv32[vx] * scale;
if (imgin[vx] < thresh) {
imgin[vx] = 0.0;
continue;
}
}
//zero output: we will not set border voxels
memset(iv32, 0, vox3D * sizeof(flt));
//
int nx = nim->nx;
int nxy = nx * nim->ny;
for (int z = 1; z < (nim->nz -1); z++)
for (int y = 1; y < (nim->ny - 1); y++)
for (size_t x = 1; x < (nim->nx - 1); x++) {
int vx = x + (y * nx) + (z * nxy);
float val = imgin[vx];
if (val == 0.0) continue;
float mxX = MAX(imgin[vx-1],imgin[vx+1]);
float mxY = MAX(imgin[vx-nx],imgin[vx+nx]);
float mxZ = MAX(imgin[vx-nxy],imgin[vx+nxy]);
if ((imgdir[vx] == 1) && (val > mxX) && ((mxY > 0.0) || (mxZ > 0.0)) ) //left/right gradient
iv32[vx] = 1.0;
else if ((imgdir[vx] == 2) && (val > mxY) && ((mxX > 0.0) || (mxZ > 0.0)) ) //anterior/posterior gradient
iv32[vx] = 1.0;
else if ((val > mxZ) && ((mxX > 0.0) || (mxY > 0.0)))//head/foot gradient
iv32[vx] = 1.0;
}
nim->scl_inter = 0.0;
nim->scl_slope = 1.0;
nim->cal_min = 0.0;
nim->cal_max = 1.0;
} //if isBinary
_mm_free(imgdir);
_mm_free(imgin);
} //for each volume
_mm_free(kx);
_mm_free(ky);
_mm_free(kz);
return 0;
} //nifti_sobel()
#ifndef USING_WASM //WASM does not support changing sform/qform
staticx int nifti_subsamp2(nifti_image *nim, int offc) {
//naive downsampling: this is provided purely to mimic the behavior of fslmaths
// see https://nbviewer.jupyter.org/urls/dl.dropbox.com/s/s0nw827nc4kcnaa/Aliasing.ipynb
// no anti-aliasing filter https://en.wikipedia.org/wiki/Image_scaling
int invox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
//int indim[5];
//for (int i = 1; i < 5; i++)
// indim[i] = MAX(nim->dim[i], 1);
int nvol = nim->nvox / invox3D;
int x_odd = nim->nx % 2;
if ((nim->nvox < 1) || (nvol < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nx = ceil(nim->nx * 0.5);
int ny = ceil(nim->ny * 0.5);
int nz = ceil(nim->nz * 0.5);
if ((nx == nim->nx) && (ny == nim->ny) && (nz == nim->nz))
return 0;
int nvox3D = nx * ny * nz;
flt *i32 = (flt *)nim->data;
void *dat = (void *)calloc(1, nvox3D * nvol * sizeof(flt));
flt *o32 = (flt *)dat;
int x_flip = 0;
if (!neg_determ(nim))
x_flip = 1;
if (offc) {
int *wt = _mm_malloc(nvox3D * nvol * sizeof(int), 64); //weight, just for edges
for (int i = 0; i < (nvox3D * nvol); i++) {
wt[i] = 0;
o32[i] = 0.0;
}
int boost = 0;
if ((x_odd) && (x_flip))
boost = 1;
size_t i = 0;
for (int v = 0; v < nvol; v++) {
size_t vo = v * nvox3D; //volumes do not get reduced
for (int z = 0; z < nim->nz; z++) {
size_t zo = vo + ((z / 2) * ny * nx);
for (int y = 0; y < nim->ny; y++) {
size_t yo = zo + ((y / 2) * nx);
for (int x = 0; x < nim->nx; x++) {
size_t xo = yo + ((x + boost) / 2);
wt[xo]++;
o32[xo] += i32[i];
i++;
} //x
} //y
} //z
} //vol
for (int i = 0; i < (nvox3D * nvol); i++)
if (wt[i] > 0)
o32[i] /= wt[i];
_mm_free(wt);
} else { //if subsamp2offc else subsamp2
int numk = 27; //center voxel and all its neighbors
int *kernel = (int *)_mm_malloc((numk * 4) * sizeof(int), 64); //4 values: offset, xpos, ypos, weight
int i = 0;
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
kernel[i] = x + (y * nim->nx) + (z * nim->nx * nim->ny);
kernel[i + numk] = x; //left-right wrap detection
kernel[i + numk + numk] = y; //anterior-posterior wrap detection
kernel[i + numk + numk + numk] = 8 / (pow(2, sqr(x) + sqr(y) + sqr(z))); //kernel weight
i++;
}
int boost = 0;
//if ((xflip == 1) && (odd == 0)) boost = 1;
if ((x_flip == 1) && (x_odd == 0))
boost = 1;
//printf("boost %d\n", boost);
size_t nvox3Din = nim->nx * nim->ny * nim->nz;
size_t o = 0;
for (int v = 0; v < nvol; v++) {
size_t vi = v * nvox3Din;
for (int z = 0; z < nz; z++) {
int zi = (2 * z * nim->nx * nim->ny);
//printf("%zu \n", zi);
for (int y = 0; y < ny; y++) {
int yy = y + y; //y*2 input y
int yi = zi + (yy * nim->nx);
for (int x = 0; x < nx; x++) {
//int xx = x+x+xflip; //x*2 input x
int xx = x + x + boost; //x*2 input x
int xi = yi + xx;
//flt sum = 0.0;
//flt wt = 0.0;
double sum = 0.0;
double wt = 0.0;
for (int k = 0; k < numk; k++) {
if ((xi + kernel[k]) < 0)
continue; //position would be less than 0 - outside volume, avoid negative values in size_t
size_t pos = xi + kernel[k]; //offset
if (pos >= nvox3Din)
continue; //position outside volume, e.g. slice above top of volume
int xin = xx + kernel[k + numk];
if ((xin < 0) || (xin >= nim->nx))
continue; //wrap left or right
int yin = yy + kernel[k + numk + numk];
if ((yin < 0) || (yin >= nim->ny))
continue; //wrap anterior or posterior
flt w = kernel[k + numk + numk + numk];
wt += w;
sum += i32[vi + pos] * w;
}
//if (wt > 0.0) //no need to check: every voxel has at least one contributor (itself)
o32[o] = sum / wt;
//else {
// o32[o] = 666.6;
o++;
} //x
} //y
} //z
} //vol
_mm_free(kernel);
} //if subsamp2offc else subsamp2
nim->nvox = nvox3D * nvol;
nim->nx = nx;
nim->ny = ny;
nim->nz = nz;
//nim->dim[1] = nx;
//nim->dim[2] = ny;
//nim->dim[3] = nz;
nim->dx *= 2;
nim->dy *= 2;
nim->dz *= 2;
//nim->pixdim[1] *= 2;
//nim->pixdim[2] *= 2;
//nim->pixdim[3] *= 2;
//adjust origin
mat44 m = xform(nim);
vec4 vx = setVec4(0, 0, 0);
vec4 pos = nifti_vect44mat44_mul(vx, m);
//vx = setVec4(0.5,0.5,0.5);
//vx = setVec4(1.0,0.0,0.0);
if (offc) {
//printf("%d flip odd %d\n", x_flip, x_odd);
if ((x_odd) && (x_flip))
vx = setVec4(-0.5, -0.5, -0.5); //subsamp2offc
else
vx = setVec4(0.5, 0.5, 0.5); //subsamp2offc
//if (!xflip) {
// vx = setVec4(0.5,0.5,0.5);
// printf("y\n");
//}
} else {
if (x_odd)
vx = setVec4(0, 0, 0); //subsamp2
else
vx = setVec4(1, 0, 0); //subsamp2
if (!x_flip)
vx = setVec4(0, 0, 0);
}
vec4 pos1 = nifti_vect44mat44_mul(vx, m);
vx = setVec4(pos1.v[0] - pos.v[0], pos1.v[1] - pos.v[1], pos1.v[2] - pos.v[2]);
m.m[0][3] += vx.v[0];
m.m[1][3] += vx.v[1];
m.m[2][3] += vx.v[2];
//scale spatial transform
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++)
m.m[i][j] *= 2;
//apply to both sform and qform in case VTK user
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++) {
nim->sto_xyz.m[i][j] = m.m[i][j];
nim->qto_xyz.m[i][j] = m.m[i][j];
}
free(nim->data);
nim->data = dat;
return 0;
}
staticx int nifti_resize(nifti_image *nim, flt zx, flt zy, flt zz, int interp_method) {
//see AFNI's 3dresample
//better than fslmaths: fslmaths can not resample 4D data
// time 3dresample -dxyz 4.8 4.8 4.8 -rmode Linear -prefix afni.nii -input rest.nii
// time ./sm rest.nii -subsamp2 out.nii
//However, aliasing artifacts
// time 3dresample -dxyz 4.8 4.8 4.8 -rmode Linear -prefix afni2.nii -input zoneplate3d_129.nii
int invox3D = nim->nx * nim->ny * nim->nz;
int nvol = nim->nvox / invox3D;
if ((nim->nvox < 1) || (nvol < 1))
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nx = ceil(nim->nx * zx);
int ny = ceil(nim->ny * zy);
int nz = ceil(nim->nz * zz);
if ((nx == nim->nx) && (ny == nim->ny) && (nz == nim->nz))
return 0;
int nvox3D = nx * ny * nz;
flt *i32 = (flt *)nim->data;
void *dat = (void *)calloc(1, nvox3D * nvol * sizeof(flt));
flt *o32 = (flt *)dat;
#pragma omp parallel for
for (int v = 0; v < nvol; v++) {
flt *iv32 = i32 + (v * invox3D);
//reduce in X: half the width: 1/2 input file size
flt *imgx = _mm_malloc(nx * nim->ny * nim->nz * sizeof(flt), 64); //input values prior to blur
if (nx == nim->nx) //no change in x dimension
xmemcpy(imgx, iv32, nx * nim->ny * nim->nz * sizeof(flt));
else {
CLIST *contrib = createFilter(nim->nx, nx, interp_method);
size_t i = 0;
for (size_t y = 0; y < (nim->ny * nim->nz); y++) {
for (int x = 0; x < nx; x++) {
flt weight = 0.0;
for (int j = 0; j < contrib[x].n; j++)
weight += iv32[contrib[x].p[j].pixel] * contrib[x].p[j].weight;
imgx[i++] = weight;
}
iv32 += nim->nx;
} //for y
for (i = 0; i < nx; i++)
free(contrib[i].p);
free(contrib);
}
//reduce in Y: half the height: 1/4 input size
flt *imgy = _mm_malloc(nx * ny * nim->nz * sizeof(flt), 64); //input values prior to blur
if (ny == nim->ny) //no change in y dimension
xmemcpy(imgy, imgx, nx * ny * nim->nz * sizeof(flt));
else {
CLIST *contrib = createFilter(nim->ny, ny, interp_method);
flt *iny = _mm_malloc(nim->ny * sizeof(flt), 64); //input values prior to resize
for (int z = 0; z < nim->nz; z++) {
for (int x = 0; x < nx; x++) {
int yo = (z * nx * ny) + x; //output
int yi = (z * nx * nim->ny) + x; //input
for (int j = 0; j < nim->ny; j++) {
//iny[j] = imgx[yi+(j*nx)];
iny[j] = imgx[yi];
yi += nx;
}
for (int y = 0; y < ny; y++) {
flt weight = 0.0;
for (int j = 0; j < contrib[y].n; j++)
weight += iny[contrib[y].p[j].pixel] * contrib[y].p[j].weight;
//weight = y;
imgy[yo] = weight;
yo += nx;
} //y
} //x
} //z
_mm_free(iny);
for (int i = 0; i < ny; i++)
free(contrib[i].p);
free(contrib);
}
_mm_free(imgx);
//reduce in Z
flt *ov32 = o32 + (v * nvox3D);
if (nz == nim->nz) //no change in x dimension
xmemcpy(ov32, imgy, nx * ny * nz * sizeof(flt));
else {
CLIST *contrib = createFilter(nim->nz, nz, interp_method);
flt *inz = _mm_malloc(nim->nz * sizeof(flt), 64); //input values prior to resize
int nxy = nx * ny;
for (int y = 0; y < ny; y++) {
for (int x = 0; x < nx; x++) {
int zo = x + (y * nx); //output offset
int zi = x + (y * nx); //input offset
for (int j = 0; j < nim->nz; j++) {
inz[j] = imgy[zi];
zi += nxy;
}
for (int z = 0; z < nz; z++) {
//for (int j = 0; j < nim->nz; j++)
// inz[j] = imgy[zi+(j*nx*ny)];
flt weight = 0.0;
for (int j = 0; j < contrib[z].n; j++)
weight += inz[contrib[z].p[j].pixel] * contrib[z].p[j].weight;
//weight = y;
ov32[zo] = weight;
zo += nx * ny;
} //for z
} //for x
} //for y
_mm_free(inz);
for (int i = 0; i < nz; i++)
free(contrib[i].p);
free(contrib);
}
_mm_free(imgy);
} //for v
nim->nvox = nvox3D * nvol;
nim->nx = nx;
nim->ny = ny;
nim->nz = nz;
//nim->dim[1] = nx;
//nim->dim[2] = ny;
//nim->dim[3] = nz;
nim->dx /= zx;
nim->dy /= zy;
nim->dz /= zz;
//nim->pixdim[1] /= zx;
//nim->pixdim[2] /= zy;
//nim->pixdim[3] /= zz;
//adjust origin - again, just like fslmaths
mat44 m = xform(nim);
m.m[0][0] /= zx;
m.m[1][0] /= zx;
m.m[2][0] /= zx;
m.m[0][1] /= zy;
m.m[1][1] /= zy;
m.m[2][1] /= zy;
m.m[0][2] /= zz;
m.m[1][2] /= zz;
m.m[2][2] /= zz;
for (int i = 0; i < 4; i++) //transform BOTH sform and qform (e.g. ANTs/ITK user)
for (int j = 0; j < 4; j++) {
nim->sto_xyz.m[i][j] = m.m[i][j];
nim->qto_xyz.m[i][j] = m.m[i][j];
}
free(nim->data);
nim->data = dat;
return 0;
}
#endif //WASM does not support changing sform/qform
staticx int essentiallyEqual(float a, float b) {
if (isnan(a) && isnan(b))
return 1; //surprisingly, with C nan != nan
return fabs(a - b) <= ((fabs(a) > fabs(b) ? fabs(b) : fabs(a)) * epsilon);
}
staticx int nifti_binary_power(nifti_image *nim, double v) {
//clone operations from ANTS ImageMath: power
//https://manpages.debian.org/jessie/ants/ImageMath.1.en.html
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
//flt fv = v;
flt *f32 = (flt *)nim->data;
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = pow(f32[i], v);
return 0;
}
struct sortIdx {
flt val;
int idx;
};
staticx int nifti_fillh(nifti_image *nim, int is26) {
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC)
return 1;
int nvox3D = nim->nx * nim->ny * nim->nz;
int nvol = nim->nvox / nvox3D;
//size_t nxy = nim->nx * nim->ny; //slice increment
uint8_t *vx = (uint8_t *)_mm_malloc(nim->nvox * sizeof(uint8_t), 64);
memset(vx, 0, nim->nvox * sizeof(uint8_t));
size_t n1 = 0;
flt *f32 = (flt *)nim->data;
for (size_t i = 0; i < nim->nvox; i++)
if (f32[i] > 0.0) {
n1++;
vx[i] = 1;
}
if ((n1 < 1) || (nim->nx < 3) || (nim->ny < 3) || (nim->nz < 3)) {
//if fewer than 3 rows, columns or slices all voxels touch edge.
//only a binary threshold, not a flood fill
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = vx[i];
_mm_free(vx);
return 1;
}
//set up kernel to search for neighbors. Since we already included sides, we do not worry about A<->P and L<->R wrap
int numk = 6;
if (is26)
numk = 26;
int32_t *k = (int32_t *)_mm_malloc(numk * sizeof(int32_t), 64); //queue with untested seed
if (is26) {
int j = 0;
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
k[j] = x + (y * nim->nx) + (z * nim->nx * nim->ny);
j++;
} //for x
} else { //if 26 neighbors else 6..
k[0] = nim->nx * nim->ny; //up
k[1] = -k[0]; //down
k[2] = nim->nx; //anterior
k[3] = -k[2]; //posterior
k[4] = 1; //left
k[5] = -1;
}
//https://en.wikipedia.org/wiki/Flood_fill
#pragma omp parallel for
for (int v = 0; v < nvol; v++) {
uint8_t *vxv = vx;
vxv += (v * nvox3D);
uint8_t *vxs = (uint8_t *)_mm_malloc(nim->nvox * sizeof(uint8_t), 64);
xmemcpy(vxs, vxv, nvox3D * sizeof(uint8_t)); //dst, src
int32_t *q = (int32_t *)_mm_malloc(nvox3D * sizeof(int32_t), 64); //queue with untested seed
int qlo = 0;
int qhi = -1; //ints always signed in C!
//load edges
size_t i = 0;
for (int z = 0; z < nim->nz; z++) {
int zedge = 0;
if ((z == 0) || (z == (nim->nz - 1)))
zedge = 1;
for (int y = 0; y < nim->ny; y++) {
int yedge = 0;
if ((y == 0) || (y == (nim->ny - 1)))
yedge = 1;
for (int x = 0; x < nim->nx; x++) {
if ((vxs[i] == 0) && (zedge || yedge || (x == 0) || (x == (nim->nx - 1)))) { //found new seed
vxs[i] = 1; //do not find again
qhi++;
q[qhi] = i;
} // new seed
i++;
} //for x
} //y
} //z
//printf("seeds %d kernel %d\n", qhi+1, numk);
//run a 'first in, first out' queue
while (qhi >= qlo) {
//retire one seed, add 0..6 new ones (fillh) or 0..26 new ones (fillh26)
for (int j = 0; j < numk; j++) {
int jj = q[qlo] + k[j];
if ((jj < 0) || (jj >= nvox3D))
continue;
if (vxs[jj] != 0)
continue;
//add new seed;
vxs[jj] = 1;
qhi++;
q[qhi] = jj;
}
qlo++;
} //while qhi >= qlo: continue until all seeds tested
for (size_t i = 0; i < nvox3D; i++)
if (vxs[i] == 0)
vxv[i] = 1; //hidden internal voxel not found from the fill
_mm_free(vxs);
_mm_free(q);
} //for each volume
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = vx[i];
_mm_free(vx);
_mm_free(k);
return 0;
}
staticx void rand_test() {
//https://www.phoronix.com/scan.php?page=news_item&px=Linux-RdRand-Sanity-Check
int r0 = rand();
for (int i = 0; i < 7; i++)
if (rand() != r0)
return;
printfx("RDRAND gives funky output: update firmware\n");
}
staticx int nifti_unary(nifti_image *nim, enum eOp op) {
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC) {
printfx("nifti_unary: Unsupported datatype %d\n", nim->datatype);
return 1;
}
flt *f32 = (flt *)nim->data;
if (op == exp1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = exp(f32[i]);
} else if (op == log1) {
for (size_t i = 0; i < nim->nvox; i++) {
if (f32[i] <= 0.0)
f32[i] = 0.0;
else
f32[i] = log(f32[i]);
}
} else if (op == floor1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = floor(f32[i]);
} else if (op == round1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = round(f32[i]);
} else if (op == ceil1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = ceil(f32[i]);
} else if (op == trunc1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = trunc(f32[i]);
} else if (op == sin1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = sin(f32[i]);
} else if (op == cos1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = cos(f32[i]);
} else if (op == tan1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = tan(f32[i]);
} else if (op == asin1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = asin(f32[i]);
} else if (op == acos1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = acos(f32[i]);
} else if (op == atan1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = atan(f32[i]);
} else if (op == sqr1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = f32[i] * f32[i]; //<- pow(a,x) uses flt for x
} else if (op == sqrt1) {
nifti_sqrt(f32, nim->nvox);
} else if (op == recip1) { //https://stackoverflow.com/questions/10606483/sse-reciprocal-if-not-zero
for (size_t i = 0; i < nim->nvox; i++) {
if (f32[i] == 0.0f)
continue;
f32[i] = 1.0 / f32[i];
}
} else if (op == abs1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = fabs(f32[i]);
} else if (op == bin1) {
for (size_t i = 0; i < nim->nvox; i++) {
if (f32[i] > 0)
f32[i] = 1.0f;
else
f32[i] = 0.0f;
}
} else if (op == binv1) {
for (size_t i = 0; i < nim->nvox; i++) {
if (f32[i] > 0)
f32[i] = 0.0f;
else
f32[i] = 1.0f;
}
} else if (op == edge1) {
if ((nim->dx == 0.0) || (nim->dy == 0.0) || (nim->dz == 0.0)) {
printfx("edge requires non-zero pixdim1/pixdim2/pixdim3\n");
return 1;
}
flt xscl = 1.0 / (sqr(nim->dx));
flt yscl = 1.0 / (sqr(nim->dy));
flt zscl = 1.0 / (sqr(nim->dz));
flt xyzscl = 1.0 / (2.0 * sqrt(xscl + yscl + zscl));
if (nim->nz < 2) { //no slices 'above' or 'below' for 2D
size_t nxy = nim->nx * nim->ny; //slice increment
int nvol = nim->nvox / nxy;
if ((nvol * nxy) != nim->nvox)
return 1;
#pragma omp parallel for
for (int v = 0; v < nvol; v++) { //find maximum for each entire volume (excepted observed volume 0)
flt *inp = (flt *)_mm_malloc(nxy * sizeof(flt), 64);
flt *o32 = (flt *)f32;
o32 += v * nxy;
xmemcpy(inp, o32, nxy * sizeof(flt)); //dst, src
for (int y = 1; (y < (nim->ny - 1)); y++) {
size_t yo = y * nim->nx;
for (int x = 1; (x < (nim->nx - 1)); x++) {
size_t vx = yo + x;
flt xv = sqr(inp[vx + 1] - inp[vx - 1]) * xscl;
flt yv = sqr(inp[vx + nim->nx] - inp[vx - nim->nx]) * yscl;
o32[vx] = sqrt(xv + yv) * xyzscl;
} //x
} //y
_mm_free(inp);
} //for v
return 1;
} //edge for 2D volume(s)
int nvox3D = nim->nx * nim->ny * nim->nz;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
size_t nxy = nim->nx * nim->ny; //slice increment
#pragma omp parallel for
for (int v = 0; v < nvol; v++) { //find maximum for each entire volume (excepted observed volume 0)
flt *inp = (flt *)_mm_malloc(nvox3D * sizeof(flt), 64);
flt *o32 = (flt *)f32;
o32 += v * nvox3D;
xmemcpy(inp, o32, nvox3D * sizeof(flt)); //dst, src
for (int z = 1; (z < (nim->nz - 1)); z++) {
size_t zo = z * nxy;
for (int y = 1; (y < (nim->ny - 1)); y++) {
size_t yo = y * nim->nx;
for (int x = 1; (x < (nim->nx - 1)); x++) {
size_t vx = zo + yo + x;
flt xv = sqr(inp[vx + 1] - inp[vx - 1]) * xscl;
flt yv = sqr(inp[vx + nim->nx] - inp[vx - nim->nx]) * yscl;
flt zv = sqr(inp[vx + nxy] - inp[vx - nxy]) * zscl;
o32[vx] = sqrt(xv + yv + zv) * xyzscl;
} //x
} //y
} //z
_mm_free(inp);
} //for v
return 1; //edge for 3D volume(s)
} else if (op == index1) {
//nb FSLmaths flips dim[1] depending on determinant
#ifndef USING_WASM
size_t idx = 0;
if (!neg_determ(nim)) { //flip x
size_t nyzt = nim->nvox / nim->nx;
if ((nyzt * nim->nx) != nim->nvox)
return 1;
for (size_t i = 0; i < nyzt; i++) {
size_t row = i * nim->nx;
;
int x = nim->nx;
while (x > 0) {
x--;
if (f32[row + x] != 0)
f32[row + x] = idx++;
} //for each column (x)
} //for each row (yzt)
} else //don't flip x
for (size_t i = 0; i < nim->nvox; i++)
if (f32[i] != 0)
f32[i] = idx++;
#endif
} else if (op == nan1) {
for (size_t i = 0; i < nim->nvox; i++)
if (isnan(f32[i]))
f32[i] = 0.0;
} else if (op == nanm1) {
for (size_t i = 0; i < nim->nvox; i++)
if (isnan(f32[i]))
f32[i] = 1.0;
else
f32[i] = 0.0;
} else if (op == rand1) {
rand_test();
flt scl = (1.0 / RAND_MAX);
for (size_t i = 0; i < nim->nvox; i++)
f32[i] += rand() * scl;
} else if (op == randn1) {
rand_test();
//https://en.wikipedia.org/wiki/Box–Muller_transform
//for SIMD see https://github.com/miloyip/normaldist-benchmark
static const flt sigma = 1.0f;
static const flt mu = 0.0;
//static const flt epsilon = FLT_EPSILON;
static const flt two_pi = 2.0 * 3.14159265358979323846;
static const flt scl = (1.0 / RAND_MAX);
//fill pairs
for (size_t i = 0; i < (nim->nvox - 1); i += 2) {
flt u1, u2;
do {
u1 = rand() * scl;
u2 = rand() * scl;
} while (u1 <= epsilon);
flt su1 = sqrt(-2.0 * log(u1));
flt z0 = su1 * cos(two_pi * u2);
flt z1 = su1 * sin(two_pi * u2);
f32[i] += z0 * sigma + mu;
f32[i + 1] += z1 * sigma + mu;
}
//if odd, fill final voxel
if (nim->nvox % 2 != 0) {
flt u1, u2;
do {
u1 = rand() * scl;
u2 = rand() * scl;
} while (u1 <= epsilon);
flt z0 = sqrt(-2.0 * log(u1)) * cos(two_pi * u2);
f32[nim->nvox - 1] += z0 * sigma + mu;
}
} else if (op == range1) {
flt mn = f32[0];
flt mx = mn;
for (size_t i = 0; i < nim->nvox; i++) {
mn = fmin(f32[i], mn);
mx = fmax(f32[i], mx);
}
nim->cal_min = mn;
nim->cal_max = mx;
} else if (op == rank1) {
#ifndef USING_WASM //WASM does not like qsort
int nvox3D = nim->nx * nim->ny * nim->nz;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol <= 1) {
//you are always first if you are the only one to show up...
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = 1;
} else {
#pragma omp parallel for
for (int i = 0; i < nvox3D; i++) {
//how do we handle ties?
struct sortIdx *k = (struct sortIdx *)_mm_malloc(nvol * sizeof(struct sortIdx), 64);
size_t j = i;
for (int v = 0; v < nvol; v++) {
k[v].val = f32[j];
k[v].idx = j;
j += nvox3D;
}
int varies = 0;
for (int v = 0; v < nvol; v++) {
if (k[v].val != k[0].val) {
varies = 1;
break;
}
}
if (varies) {
qsort(k, nvol, sizeof(struct sortIdx), compare);
for (int v = 0; v < nvol; v++)
f32[k[v].idx] = v + 1;
} else {
j = i;
for (int v = 0; v < nvol; v++) {
f32[j] = v + 1;
j += nvox3D;
}
}
_mm_free(k);
} //for i
} //nvol > 1
#endif //WASM does not like qsort
} else if ((op == rank1) || (op == ranknorm1)) {
#ifndef USING_WASM //WASM does not like qsort
int nvox3D = nim->nx * nim->ny * nim->nz;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol <= 1) {
//you are always first if you are the only one to show up...
for (int i = 0; i < nim->nvox; i++)
f32[i] = 0;
} else {
#pragma omp parallel for
for (int i = 0; i < nvox3D; i++) {
struct sortIdx *k = (struct sortIdx *)_mm_malloc(nvol * sizeof(struct sortIdx), 64);
size_t j = i;
double sum = 0.0;
for (int v = 0; v < nvol; v++) {
k[v].val = f32[j];
sum += k[v].val;
k[v].idx = j;
j += nvox3D;
}
double mean = sum / nvol;
double sumSqr = 0.0;
for (int v = 0; v < nvol; v++)
sumSqr += sqr(k[v].val - mean);
double stdev = sqrt(sumSqr / (nvol - 1));
qsort(k, nvol, sizeof(struct sortIdx), compare);
//strange formula, but replicates fslmaths, consider nvol=3 rank[2,0,1] will be pval [2.5/3, 1.5/3, 0.5/3]
for (int v = 0; v < nvol; v++)
f32[k[v].idx] = (stdev * -qginv((double)(v + 0.5) / (double)nvol)) + mean;
_mm_free(k);
} //for i
} //nvol > 1
#endif //WASM does not like qsort
} else if (op == ztop1) {
for (size_t i = 0; i < nim->nvox; i++)
f32[i] = qg(f32[i]);
} else if (op == ptoz1) {
//given p, return x such that Q(x)=p, for 0 < p < 1
// #ifdef DT32
const flt kNaN = NAN;
//const flt kNaN = 0.0 / 0.0;
for (size_t i = 0; i < nim->nvox; i++) {
if ((f32[i] < 0.0) || (f32[i] > 1.0))
f32[i] = kNaN;
else
f32[i] = qginv(f32[i]);
}
} else if ((op == pval1) || (op == pval01)) {
int nvox3D = nim->nx * nim->ny * nim->nz;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol <= 1) {
printfx("permutation tests require 4D datasets.\n");
return 1;
}
//void *dat = (void *)calloc(1, nvox3D * sizeof(flt));
//flt *o32 = (flt *)dat;
flt *o32= (flt *)_mm_malloc(nvox3D * sizeof(flt), 64);
memset(o32, 0, nvox3D * sizeof(flt)); //zero array
#pragma omp parallel for
for (int i = 0; i < nvox3D; i++) {
size_t vi = i;
flt obs = f32[vi]; //observed value - see if it is extreme relative to permutations
int nNotZero = 0;
int nGreater = 0;
int nEqual = 0; //observation in first volume
flt f32v0 = f32[vi];
for (int v = 0; v < nvol; v++) {
if (f32[vi] != 0)
nNotZero++;
if (f32[vi] == f32v0)
nEqual++;
if (f32[vi] >= obs)
nGreater++;
vi += nvox3D;
}
if (op == pval1) {
//if (nEqual == nvol)
// o32[i] = 0.0;
//else
o32[i] = (double)nGreater / (double)nvol;
} else {
if (nEqual == nvol)
o32[i] = 0.0;
else if (obs == 0)
o32[i] = 1.0;
else //nZero must be at least 1: the observed data is not zero
o32[i] = (double)nGreater / (double)(nNotZero);
}
} //for i
nim->nvox = nvox3D;
nim->ndim = 3;
nim->nt = 1;
//nim->dim[0] = 3;
//nim->dim[4] = 1;
//free(nim->data);
_mm_free(nim->data);
nim->data = (void *)o32;
} else if (op == cpval1) {
int nvox3D = nim->nx * nim->ny * nim->nz;
int nvol = nim->nvox / nvox3D;
if ((nvox3D * nvol) != nim->nvox)
return 1;
if (nvol <= 1) {
printfx("permutation tests require 4D datasets.\n");
return 1;
}
//void *dat = (void *)calloc(1, nvox3D * sizeof(flt));
//flt *o32 = (flt *)dat;
flt *o32= (flt *)_mm_malloc(nvox3D * sizeof(flt), 64);
memset(o32, 0, nvox3D * sizeof(flt)); //zero array
flt *vmax = (flt *)_mm_malloc(nvol * sizeof(flt), 64);
#pragma omp parallel for
for (int v = 1; v < nvol; v++) { //find maximum for each entire volume (excepted observed volume 0)
size_t vo = v * nvox3D;
flt mx = f32[vo];
for (int i = 0; i < nvox3D; i++)
mx = MAX(mx, f32[vo + i]);
vmax[v] = mx;
//printf("%d %g\n", v, mx);
}
#pragma omp parallel for
for (int i = 0; i < nvox3D; i++) {
flt obs = f32[i]; //observed value - see if it is extreme relative to permutations
int nGreater = 1; //count observation
for (int v = 1; v < nvol; v++)
if (vmax[v] >= obs)
nGreater++;
o32[i] = (double)nGreater / (double)nvol;
} //for i
_mm_free(vmax);
nim->nvox = nvox3D;
nim->ndim = 3;
nim->nt = 1;
//nim->dim[0] = 3;
//nim->dim[4] = 1;
//free(nim->data);
//nim->data = dat;
_mm_free(nim->data);
nim->data = (void *)o32;
} else {
printfx("nifti_unary: Unsupported operation\n");
return 1;
}
return 0;
} //nifti_unary()
staticx int nifti_thrp(nifti_image *nim, double v, enum eOp op) {
// -thrp: use following percentage (0-100) of ROBUST RANGE to threshold current image (zero anything below the number)
// -thrP: use following percentage (0-100) of ROBUST RANGE of non-zero voxels and threshold below
// -uthrp : use following percentage (0-100) of ROBUST RANGE to upper-threshold current image (zero anything above the number)
// -uthrP : use following percentage (0-100) of ROBUST RANGE of non-zero voxels and threshold above
if ((v < 0.0) || (v > 100.0)) {
printfx("nifti_thrp: threshold should be between 0..100\n");
return 1;
}
flt pct2, pct98;
int ignoreZeroVoxels = 0;
if ((op == thrP) || (op == uthrP))
ignoreZeroVoxels = 1;
if (nifti_robust_range(nim, &pct2, &pct98, ignoreZeroVoxels) != 0)
return 1;
flt thresh = pct2 + ((v / 100.0) * (pct98 - pct2));
int modifyBrightVoxels = 0;
flt newIntensity = 0.0;
if ((op == clamp) || (op == uclamp))
newIntensity = thresh;
if ((op == uthrp) || (op == uthrP) || (op == uclamp))
modifyBrightVoxels = 1;
nifti_thr(nim, thresh, modifyBrightVoxels, newIntensity);
return 0;
} //nifti_thrp()
#ifndef USING_WASM
staticx int nifti_roc(nifti_image *nim, double fpThresh, const char *foutfile, const char *fnoise, const char *ftruth) {
if (nim->datatype != DT_CALC)
return 1;
//(nim, thresh, argv[outfile], fnoise, argv[truth]);
//fslmaths appears to ignore voxels on edge of image, and will crash with small images:
// error: sort(): given object has non-finite elements
//therefore, there is a margin ("border") around the volume
int border = 5; //in voxels
int mindim = border + border + 1; //e.g. minimum size has one voxel surrounded by border on each side
if ((nim->nx < mindim) || (nim->ny < mindim) || (nim->nz < mindim)) {
printfx("volume too small for ROC analyses\n");
return 1;
}
if (nim->nvox > (nim->nx * nim->ny * nim->nz)) {
printfx("ROC input should be 3D image (not 4D)\n"); //fslmaths seg faults
return 1;
}
if ((fpThresh <= 0.0) || (fpThresh >= 1.0)) {
printfx("ROC false-positive threshold should be between 0 and 1, not '%g'\n", fpThresh);
return 1;
}
nifti_image *nimTrue = nifti_image_read2(ftruth, 1);
if (!nimTrue) {
printfx("** failed to read NIfTI image from '%s'\n", ftruth);
exit(2);
}
if ((nim->nx != nimTrue->nx) || (nim->ny != nimTrue->ny) || (nim->nz != nimTrue->nz)) {
printfx("** Truth image is the wrong size %" PRId64 "x%" PRId64 "x%" PRId64 " vs %" PRId64 "x%" PRId64 "x%" PRId64 "\n", nim->nx, nim->ny, nim->nz, nimTrue->nx, nimTrue->ny, nimTrue->nz);
nifti_image_free(nimTrue);
exit(1);
}
if (nimTrue->nvox > (nimTrue->nx * nimTrue->ny * nimTrue->nz)) {
printfx("ROC truth should be 3D image (not 4D)\n"); //fslmaths seg faults
return 1;
}
nifti_image *nimNoise = NULL;
//count number of tests
//If the truth image contains negative voxels these get excluded from all calculations
int nTest = 0;
int nTrue = 0;
size_t i = 0;
flt *imgTrue = (flt *)nimTrue->data;
flt *imgObs = (flt *)nim->data;
for (int z = 0; z < nim->nz; z++)
for (int y = 0; y < nim->ny; y++)
for (int x = 0; x < nim->nx; x++) {
if ((imgTrue[i] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border))) {
nTest++;
if (imgTrue[i] > 0)
nTrue++;
}
i++;
}
if (nTest < 1) {
printfx("** All truth voxels inside border are negative\n");
exit(1);
}
//printf("%d %d = %d\n", nTrue, nFalse, nTest);
if (nTest == nTrue)
printfx("Warning: All truth voxels inside border are the same (all true or all false)\n");
struct sortIdx *k = (struct sortIdx *)_mm_malloc(nTest * sizeof(struct sortIdx), 64);
//load the data
nTest = 0;
i = 0;
for (int z = 0; z < nim->nz; z++)
for (int y = 0; y < nim->ny; y++)
for (int x = 0; x < nim->nx; x++) {
if ((imgTrue[i] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border))) {
k[nTest].val = imgObs[i];
k[nTest].idx = imgTrue[i] > 0;
nTest++;
}
i++;
}
qsort(k, nTest, sizeof(struct sortIdx), compare);
//for (int v = 0; v < nvol; v++ )
// f32[ k[v].idx ] = v + 1;
//printf("%d tests, intensity range %g..%g\n", nTest, k[0].val, k[nTest-1].val);
FILE *txt = fopen(foutfile, "w+");
flt threshold = k[nTest - 1].val; //maximum observed intensity
int bins = 1000; //step size: how often are results reported
flt step = (threshold - k[0].val) / bins; //[max-min]/bins
int fp = 0;
int tp = 0;
if (fnoise != NULL) {
nimNoise = nifti_image_read2(fnoise, 1);
if ((nim->nx != nimNoise->nx) || (nim->ny != nimNoise->ny) || (nim->nz != nimNoise->nz)) {
printfx("** Noise image is the wrong size %" PRId64 "x%" PRId64 "x%" PRId64 " vs %" PRId64 "x%" PRId64 "x%" PRId64 "\n", nim->nx, nim->ny, nim->nz, nimNoise->nx, nimNoise->ny, nimNoise->nz);
nifti_image_free(nimTrue);
nifti_image_free(nimNoise);
exit(1);
}
//Matlab script roc.m generates samples you can process with fslmaths.\
// The fslmaths text file includes two additional columns of output not described by the help documentation
// Appears to find maximum signal in each noise volume, regardless of whether it is a hit or false alarm.
int nvox3D = nim->nx * nim->ny * nim->nz;
int nvol = nimNoise->nvox / nvox3D;
if (nvol < 10)
printfx("Warning: Noise images should include many volumes for estimating familywise error/\n");
flt *imgNoise = (flt *)nimNoise->data;
flt *mxVox = (flt *)_mm_malloc(nvol * sizeof(flt), 64);
for (int v = 0; v < nvol; v++) { //for each volume
mxVox[v] = -INFINITY;
size_t vo = v * nvox3D;
size_t vi = 0;
for (int z = 0; z < nim->nz; z++)
for (int y = 0; y < nim->ny; y++)
for (int x = 0; x < nim->nx; x++) {
if ((imgTrue[vi] >= 0) && (x >= border) && (y >= border) && (z >= border) && (x < (nim->nx - border)) && (y < (nim->ny - border)) && (z < (nim->nz - border)))
mxVox[v] = MAX(mxVox[v], imgNoise[vo + vi]);
vi++;
}
} //for each volume
nifti_image_free(nimNoise);
qsort(mxVox, nvol, sizeof(flt), compare);
int idx = nTest - 1;
flt mxNoise = mxVox[nvol - 1];
while ((idx >= 1) && (k[idx].val > mxNoise)) {
tp++;
idx--;
if ((k[idx].val != k[idx - 1].val) && (k[idx].val <= threshold)) {
fprintf(txt, "%g %g %g\n", (double)fp / (double)nvol, (double)tp / (double)nTrue, threshold);
threshold = threshold - step; //delay next report
}
} //more significant than any noise...
int fpThreshInt = round(fpThresh * nvol); //stop when number of false positives exceed this
for (int i = nvol - 1; i >= 1; i--) {
fp++; //false alarm
while ((idx >= 1) && (k[idx].val >= mxVox[i])) {
tp++;
idx--;
if ((k[idx].val != k[idx - 1].val) && (k[idx].val <= threshold)) {
fprintf(txt, "%g %g %g\n", (double)fp / (double)nvol, (double)tp / (double)nTrue, threshold);
threshold = threshold - step; //delay next report
}
} //at least as significant as current noise
if ((fp > fpThreshInt) || ((k[i].val != k[i - 1].val) && (k[i].val <= threshold))) {
//printf("%g %g %g\n", (double)fp/(double)nFalse, (double)tp/(double)nTrue, threshold);
fprintf(txt, "%g %g %g\n", (double)fp / (double)nvol, (double)tp / (double)nTrue, threshold);
threshold = threshold - step; //delay next report
}
if (fp > fpThreshInt)
break;
} //inspect all tests...
_mm_free(mxVox);
exit(1);
} else { //if noise image else infer FP/TP from input image
int nFalse = nTest - nTrue;
int fpThreshInt = ceil(fpThresh * nFalse); //stop when number of false positives exceed this
for (int i = nTest - 1; i >= 1; i--) {
if (k[i].idx == 0)
fp++; //false alarm
else
tp++; //hit
if ((fp > fpThreshInt) || ((k[i].val != k[i - 1].val) && (k[i].val <= threshold))) {
//printf("%g %g %g\n", (double)fp/(double)nFalse, (double)tp/(double)nTrue, threshold);
fprintf(txt, "%g %g %g\n", (double)fp / (double)nFalse, (double)tp / (double)nTrue, threshold);
threshold = threshold - step; //delay next report
}
if (fp > fpThreshInt)
break;
} //inspect all tests...
} //if noise else...
fclose(txt);
_mm_free(k);
nifti_image_free(nimTrue);
return 0;
}
staticx int nifti_binary(nifti_image *nim, char *fin, enum eOp op) {
if (nim->nvox < 1)
return 1;
if (nim->datatype != DT_CALC) {
printfx("nifti_binary: Unsupported datatype %d\n", nim->datatype);
return 1;
}
nifti_image *nim2 = nifti_image_read2(fin, 1);
if (!nim2) {
printfx("** failed to read NIfTI image from '%s'\n", fin);
return 2;
}
if ((nim->nx != nim2->nx) || (nim->ny != nim2->ny) || (nim->nz != nim2->nz)) {
printfx("** Attempted to process images of different sizes %" PRId64 "x%" PRId64 "x%" PRId64 " vs %" PRId64 "x%" PRId64 "x%" PRId64 "\n", nim->nx, nim->ny, nim->nz, nim2->nx, nim2->ny, nim2->nz);
nifti_image_free(nim2);
return 1;
}
if (max_displacement_mm(nim, nim2) > 0.5) { //fslmaths appears to use mm not voxel difference to determine alignment, threshold ~0.5mm
printfx("WARNING:: Inconsistent orientations for individual images in pipeline! (%gmm)\n", max_displacement_mm(nim, nim2));
printfx(" Will use voxel-based orientation which is probably incorrect - *PLEASE CHECK*!\n");
}
in_hdr ihdr = set_input_hdr(nim2);
if (nifti_image_change_datatype(nim2, nim->datatype, &ihdr) != 0) {
nifti_image_free(nim2);
return 1;
}
flt *imga = (flt *)nim->data;
flt *imgb = (flt *)nim2->data;
int nvox3D = nim->nx * nim->ny * nim->nz;
int nvola = nim->nvox / nvox3D;
int nvolb = nim2->nvox / nvox3D;
int rem0 = 0;
int swap4D = 0; //if 1: input nim was 3D, but nim2 is 4D: output will be 4D
if ((nvolb > 1) && (nim->nvox != nim2->nvox) && ((op == uthr) || (op == thr))) {
//"niimath 3D -uthr 4D out" only uses 1st volume of 4D, only one volume out
nvolb = 1; //fslmaths
printfx("threshold operation expects 3D mask\n"); //fslmaths makes not modification to image
if (op == uthr) //strictly for fslmaths compatibility - makes no sense
for (size_t i = 0; i < nim->nvox; i++)
imga[i] = 0;
nifti_image_free(nim2);
return 0;
} else if (nim->nvox != nim2->nvox) {
//situation where one input is 3D and the other is 4D
if ((nvola != 1) && ((nvolb != 1))) {
printfx("nifti_binary: both images must have the same number of volumes, or one must have a single volume (%d and %d)\n", nvola, nvolb);
nifti_image_free(nim2);
return 1;
}
if (nvola == 1) {
imgb = (flt *)nim->data;
imga = (flt *)nim2->data;
swap4D = 1;
nvolb = nim->nvox / nvox3D;
nvola = nim2->nvox / nvox3D;
}
} //make it so imga/novla >= imgb/nvolb
for (int v = 0; v < nvola; v++) { //
int va = v * nvox3D; //start of volume for image A
int vb = (v % nvolb) * nvox3D; //start of volume for image B
if (op == add) {
for (int i = 0; i < nvox3D; i++)
imga[va + i] += imgb[vb + i];
} else if (op == sub) {
if (swap4D) {
for (int i = 0; i < nvox3D; i++) {
imga[va + i] = imgb[vb + i] - imga[va + i];
//printf(">>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], x, imga[va+i]);
}
} else {
for (int i = 0; i < nvox3D; i++) {
//printf("[%d]/[%d] %g/%g\n", va+i, vb+i, imga[va+i], imga[vb+i]);
imga[va + i] = imga[va + i] - imgb[vb + i];
}
}
} else if (op == mul) {
for (int i = 0; i < nvox3D; i++)
imga[va + i] *= imgb[vb + i];
} else if (op == max) {
for (int i = 0; i < nvox3D; i++)
imga[va + i] = MAX(imga[va + i], imgb[vb + i]);
} else if (op == min) {
for (int i = 0; i < nvox3D; i++)
imga[va + i] = MIN(imga[va + i], imgb[vb + i]);
} else if (op == thr) {
//thr : use following number to threshold current image (zero anything below the number)
for (int i = 0; i < nvox3D; i++)
if (imga[va + i] < imgb[vb + i])
imga[va + i] = 0;
} else if (op == uthr) {
//uthr : use following number to upper-threshold current image (zero anything above the number)
for (int i = 0; i < nvox3D; i++)
if (imga[va + i] > imgb[vb + i])
imga[va + i] = 0;
} else if (op == mas) {
if (swap4D) {
for (int i = 0; i < nvox3D; i++) {
if (imga[va + i] > 0)
imga[va + i] = imgb[vb + i];
else
imga[va + i] = 0;
}
} else {
for (int i = 0; i < nvox3D; i++)
if (imgb[vb + i] <= 0)
imga[va + i] = 0;
}
} else if (op == divX) {
if (swap4D) {
for (int i = 0; i < nvox3D; i++) {
//flt x = imga[va+i];
if (imga[va + i] != 0.0f)
imga[va + i] = imgb[vb + i] / imga[va + i];
//printf(">>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], x, imga[va+i]);
}
} else {
for (int i = 0; i < nvox3D; i++) {
//printf("[%d]/[%d] %g/%g\n", va+i, vb+i, imga[va+i], imga[vb+i]);
if (imgb[vb + i] == 0.0f)
imga[va + i] = 0.0f;
else
imga[va + i] = imga[va + i] / imgb[vb + i];
}
}
} else if (op == mod) { //afni mod function, divide by zero yields 0 (unlike Matlab, see remtest.m)
//fractional remainder:
if (swap4D) {
for (int i = 0; i < nvox3D; i++) {
//printf("!>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], imga[va+i], fmod(trunc(imgb[vb+i]), trunc(imga[va+i])) );
if (imga[va + i] != 0.0f)
imga[va + i] = fmod(imgb[vb + i], imga[va + i]);
else {
rem0 = 1;
imga[va + i] = 0; //imgb[vb+i];
}
}
} else {
for (int i = 0; i < nvox3D; i++) {
//printf("?>[%d]/[%d] %g/%g = %g : %g\n", va+i, vb+i, imga[va+i], imgb[vb+i], fmod(imga[va+i], imgb[vb+i]), fmod(trunc(imga[va+i]), trunc(imgb[vb+i])) );
if (imgb[vb + i] != 0.0f)
//imga[va+i] = round(fmod(imga[va+i], imgb[vb+i]));
imga[va + i] = fmod(imga[va + i], imgb[vb + i]);
else {
rem0 = 1;
imga[va + i] = 0;
}
}
}
} else if (op == rem) { //fmod _rem
//fractional remainder:
if (swap4D) {
for (int i = 0; i < nvox3D; i++) {
//printf("!>[%d]/[%d] %g/%g = %g\n",vb+i, va+i, imgb[vb+i], imga[va+i], fmod(trunc(imgb[vb+i]), trunc(imga[va+i])) );
if (trunc(imga[va + i]) != 0.0f)
imga[va + i] = fmod(trunc(imgb[vb + i]), trunc(imga[va + i]));
else {
rem0 = 1;
imga[va + i] = imgb[vb + i];
}
}
} else {
for (int i = 0; i < nvox3D; i++) {
//printf("?>[%d]/[%d] %g/%g = %g : %g\n", va+i, vb+i, imga[va+i], imgb[vb+i], fmod(imga[va+i], imgb[vb+i]), fmod(trunc(imga[va+i]), trunc(imgb[vb+i])) );
if (trunc(imgb[vb + i]) != 0.0f)
//imga[va+i] = round(fmod(imga[va+i], imgb[vb+i]));
imga[va + i] = fmod(trunc(imga[va + i]), trunc(imgb[vb + i]));
else
rem0 = 1;
}
}
} else {
printfx("nifti_binary: unsupported operation %d\n", op);
nifti_image_free(nim2);
return 1;
}
}
if (swap4D) { //if 1: input nim was 3D, but nim2 is 4D: output will be 4D
nim->nvox = nim2->nvox;
nim->ndim = nim2->ndim;
nim->nt = nim2->nt;
nim->nu = nim2->nu;
nim->nv = nim2->nv;
nim->nw = nim2->nw;
//for (int i = 4; i < 8; i++) {
//nim->dim[i] = nim2->dim[i];
//nim->pixdim[i] = nim2->pixdim[i];
//}
nim->dt = nim2->dt;
nim->du = nim2->du;
nim->dv = nim2->dv;
nim->dw = nim2->dw;
free(nim->data);
nim->data = nim2->data;
nim2->data = NULL;
}
nifti_image_free(nim2);
if (rem0) {
printfx("Warning -rem image included zeros (fslmaths exception)\n");
return 0;
}
return 0;
} // nifti_binary()
staticx void nifti_compare(nifti_image *nim, char *fin) {
if (nim->nvox < 1)
exit(1);
if (nim->datatype != DT_CALC) {
printfx("nifti_compare: Unsupported datatype %d\n", nim->datatype);
exit(1);
}
nifti_image *nim2 = nifti_image_read2(fin, 1);
if (!nim2) {
printfx("** failed to read NIfTI image from '%s'\n", fin);
exit(2);
}
if ((nim->nx != nim2->nx) || (nim->ny != nim2->ny) || (nim->nz != nim2->nz)) {
printfx("** Attempted to process images of different sizes %" PRId64 "x%" PRId64 "x%" PRId64 "vs %" PRId64 "x%" PRId64 "x%" PRId64 "\n", nim->nx, nim->ny, nim->nz, nim2->nx, nim2->ny, nim2->nz);
nifti_image_free(nim2);
exit(1);
}
if (nim->nvox != nim2->nvox) {
printfx(" Number of volumes differ\n");
nifti_image_free(nim2);
exit(1);
}
if (max_displacement_mm(nim, nim2) > 0.5) { //fslmaths appears to use mm not voxel difference to determine alignment, threshold ~0.5mm
printfx("WARNING:: Inconsistent orientations for individual images in pipeline! (%gmm)\n", max_displacement_mm(nim, nim2));
printfx(" Will use voxel-based orientation which is probably incorrect - *PLEASE CHECK*!\n");
}
in_hdr ihdr = set_input_hdr(nim2);
if (nifti_image_change_datatype(nim2, nim->datatype, &ihdr) != 0) {
nifti_image_free(nim2);
exit(1);
}
flt *img = (flt *)nim->data;
flt *img2 = (flt *)nim2->data;
size_t differentVox = nim->nvox;
double sum = 0.0;
double sum2 = 0.0;
double maxDiff = 0.0;
size_t nNotNan = 0;
size_t nDifferent = 0;
for (size_t i = 0; i < nim->nvox; i++) {
if (!essentiallyEqual(img[i], img2[i])) {
if (fabs(img[i] - img2[i]) > maxDiff) {
differentVox = i;
maxDiff = fabs(img[i] - img2[i]);
}
nDifferent++;
}
if (isnan(img[i]) || isnan(img[i]))
continue;
nNotNan++;
sum += img[i];
sum2 += img2[i];
}
if (differentVox >= nim->nvox) {
//printfx("Images essentially equal\n"); */
nifti_image_free(nim2);
exit(0);
}
//second pass - one pass correlation is inaccurate or slow
nNotNan = MAX(1, nNotNan);
flt mn = INFINITY; //do not set to item 1, in case it is nan
flt mx = -INFINITY;
flt sd = 0.0;
flt ave = sum / nNotNan;
flt mn2 = INFINITY;
flt mx2 = -INFINITY;
flt sd2 = 0.0;
flt ave2 = sum2 / nNotNan;
//for i := 0 to (n - 1) do
// sd := sd + sqr(y[i] - mn);
//sd := sqrt(sd / (n - 1));
double sumDx = 0.0;
for (size_t i = 0; i < nim->nvox; i++) {
if (isnan(img[i]) || isnan(img[i]))
continue;
mn = MIN(mn, img[i]);
mx = MAX(mx, img[i]);
sd += sqr(img[i] - ave);
mn2 = MIN(mn2, img2[i]);
mx2 = MAX(mx2, img2[i]);
sd2 += sqr(img2[i] - ave2);
sumDx += (img[i] - ave) * (img2[i] - ave2);
}
double r = 0.0;
nNotNan = MAX(2, nNotNan);
if (nim->nvox < 2) {
sd = 0.0;
sd2 = 0.0;
} else {
sd = sqrt(sd / (nNotNan - 1));
//if (sd != 0.0) sd = 1.0/sd;
sd2 = sqrt(sd2 / (nNotNan - 1));
//if (sd2 != 0.0) sd2 = 1.0/sd2;
if ((sd * sd2) != 0.0)
r = sumDx / (sd * sd2 * (nNotNan - 1));
//r = r / (nim->nvox - 1);
}
r = MIN(r, 1.0);
r = MAX(r, -1.0);
printfx("Images Differ: Correlation r = %g, identical voxels %d%%\n", r, (int)floor(100.0 * (1.0 - (double)nDifferent / (double)nim->nvox)));
if (nNotNan < nim->nvox) {
printfx(" %" PRId64 " voxels have a NaN in at least one image.\n", nim->nvox - nNotNan);
printfx(" Descriptives consider voxels that are numeric in both images.\n");
}
printfx(" Most different voxel %g vs %g (difference %g)\n", img[differentVox], img2[differentVox], maxDiff);
int nvox3D = nim->nx * nim->ny * MAX(nim->nz, 1);
int nVol = nim->nvox / nvox3D;
size_t vx[4];
vx[3] = differentVox / nvox3D;
vx[2] = (differentVox / (nim->nx * nim->ny)) % nim->nz;
vx[1] = (differentVox / nim->nx) % nim->ny;
vx[0] = differentVox % nim->nx;
printfx(" Most different voxel location %zux%zux%zu volume %zu\n", vx[0], vx[1], vx[2], vx[3]);
printfx("Image 1 Descriptives\n");
printfx(" Range: %g..%g Mean %g StDev %g\n", mn, mx, ave, sd);
printfx("Image 2 Descriptives\n");
printfx(" Range: %g..%g Mean %g StDev %g\n", mn2, mx2, ave2, sd2);
//V1 comparison - EXIT_SUCCESS if all vectors are parallel (for DWI up vector [1 0 0] has same direction as down [-1 0 0])
if (nVol != 3) {
nifti_image_free(nim2);
exit(1);
}
int allParallel = 1;
//niimath ft_V1 -compare nt_V1
for (size_t i = 0; i < nvox3D; i++) {
//check angle of two vectors... assume unit vectors
flt v[3]; //vector, image 1
v[0] = img[i];
v[1] = img[i + nvox3D];
v[2] = img[i + nvox3D + nvox3D];
flt v2[3]; //vector, image 2
v2[0] = img2[i];
v2[1] = img2[i + nvox3D];
v2[2] = img2[i + nvox3D + nvox3D];
flt x[3]; //cross product
x[0] = (v[1] * v2[2]) - (v[2] * v2[1]);
x[1] = (v[2] * v2[0]) - (v[0] * v2[2]);
x[2] = (v[0] * v2[1]) - (v[1] * v2[0]);
flt len = sqrt((x[0] * x[0]) + (x[1] * x[1]) + (x[2] * x[2]));
if (len > 0.01) {
allParallel = 0;
//printfx("[%g %g %g] vs [%g %g %g]\n", v[0],v[1], v[2], v2[0], v2[1], v2[2]);
break;
}
}
if (allParallel) {
printfx("Despite polarity differences, all vectors are parallel.\n");
nifti_image_free(nim2);
exit(0);
}
nifti_image_free(nim2);
exit(1);
} //nifti_compare()
#ifdef DT32
int main32X(int argc, char *argv[]) {
#else
int main64X(int argc, char *argv[]) {
#endif
char *fin = NULL, *fout = NULL;
//fslmaths in.nii out.nii changes datatype to flt, here we retain (similar to earlier versions of fslmaths)
//fslmsths in.nii -rem 10 out.nii uses integer modulus not fmod
//fslmaths robust range not fully described, this emulation is close
//fslmaths ing/inm are listed as "unary" but should be listed as binary
if (argc < 3) {
printfx("Fatal: show_help shown by wrapper function\n");
exit(1);
}
int dtCalc = DT_FLOAT32; //data type for calculation
int dtOut = DT_FLOAT32; //data type for calculation
int ac = 1;
// '-dt' sets datatype for calculations
if (!strcmp(argv[ac], "-dt")) {
if (!strcmp(argv[ac + 1], "double")) {
dtCalc = DT_FLOAT64;
} else if (strcmp(argv[ac + 1], "float")) {
printfx("'-dt' error: only float or double calculations supported\n");
return 1;
}
ac += 2;
if (argc < (ac + 2))
return 1; //insufficient arguments remain
}
//special case: pass through
// no calculation, simple pass through copy, e.g. "niimaths in.nii out.nii.gz"
// note fslmaths would save as flt type... but lossless conversion in native format is faster
// note here we use nifti_image_read not nifti_image_read2 to preserve cal_min, cal_max
if (ac + 2 == argc) {
fin = argv[ac]; // no string copy, just pointer assignment
ac++;
nifti_image *nim = nifti_image_read(fin, 1);
fout = argv[ac]; // no string copy, just pointer assignment
ac++;
if (nifti_set_filenames(nim, fout, 0, 1))
return 1;
nifti_save(nim, ""); //nifti_image_write( nim );
nifti_image_free(nim);
return 0;
} //end pass through
// next argument is input file
fin = argv[ac]; // no string copy, just pointer assignment
ac++;
//clock_t startTime = clock();
nifti_image *nim = nifti_image_read2(fin, 1);
if (!nim) {
printfx("** failed to read NIfTI image from '%s'\n", fin);
return 2;
}
//printf("read time: %ld ms\n", timediff(startTime, clock()));
in_hdr ihdr = set_input_hdr(nim);
//check for "-odt" must be last couplet
if (!strcmp(argv[argc - 2], "-odt")) {
if (!strcmp(argv[argc - 1], "double")) {
dtOut = DT_FLOAT64;
} else if (!strcmp(argv[argc - 1], "flt")) {
dtOut = DT_FLOAT32;
} else if (!strcmp(argv[argc - 1], "int")) {
dtOut = DT_INT32;
} else if (!strcmp(argv[argc - 1], "short")) {
dtOut = DT_INT16;
} else if (!strcmp(argv[argc - 1], "ushort")) {
dtOut = DT_UINT16;
} else if (!strcmp(argv[argc - 1], "char")) {
dtOut = DT_UINT8;
} else if (!strcmp(argv[argc - 1], "input")) {
dtOut = nim->datatype; //ihdr.datatype; //!
} else {
printfx("Error: Unknown datatype '%s' - Possible datatypes are: char short ushort int flt double input\n", argv[argc - 1]);
return 2;
}
argc = argc - 2;
} //odt
//convert data to calculation type (-dt)
if (nifti_image_change_datatype(nim, dtCalc, &ihdr) != 0)
return 1;
//check output filename, e.g does file exist
fout = argv[argc - 1]; // no string copy, just pointer assignment
if (nifti_set_filenames(nim, fout, 0, 1))
return 1;
argc = argc - 1;
#if defined(_OPENMP)
const int maxNumThreads = omp_get_max_threads();
const char *key = "AFNI_COMPRESSOR";
char *value;
value = getenv(key);
//export AFNI_COMPRESSOR=PIGZ
char pigzKey[5] = "PIGZ";
if ((value != NULL) && (strstr(value, pigzKey))) {
omp_set_num_threads(maxNumThreads);
printfx("Using %d threads\n", maxNumThreads);
} else {
omp_set_num_threads(1);
printfx("Single threaded\n");
}
#endif
//read operations
int nkernel = 0; //number of voxels in kernel
int *kernel = make_kernel(nim, &nkernel, 3, 3, 3);
char *end;
int ok = 0;
while (ac < argc) {
enum eOp op = unknown;
if (!strcmp(argv[ac], "-add"))
op = add;
if (!strcmp(argv[ac], "-sub"))
op = sub;
if (!strcmp(argv[ac], "-mul"))
op = mul;
if (!strcmp(argv[ac], "-div"))
op = divX;
if (!strcmp(argv[ac], "-rem"))
op = rem;
if (!strcmp(argv[ac], "-mod"))
op = mod;
if (!strcmp(argv[ac], "-mas"))
op = mas;
if (!strcmp(argv[ac], "-thr"))
op = thr;
if (!strcmp(argv[ac], "-thrp"))
op = thrp;
if (!strcmp(argv[ac], "-thrP"))
op = thrP;
if (!strcmp(argv[ac], "-uthr"))
op = uthr;
if (!strcmp(argv[ac], "-uthrp"))
op = uthrp;
if (!strcmp(argv[ac], "-uthrP"))
op = uthrP;
if (!strcmp(argv[ac], "-clamp"))
op = clamp;
if (!strcmp(argv[ac], "-uclamp"))
op = uclamp;
if (!strcmp(argv[ac], "-max"))
op = max;
if (!strcmp(argv[ac], "-min"))
op = min;
if (!strcmp(argv[ac], "-max"))
op = max;
//if ( ! strcmp(argv[ac], "-addtozero") ) op = addtozero; //variation of mas
//if ( ! strcmp(argv[ac], "-overadd") ) op = overadd; //variation of mas
if (!strcmp(argv[ac], "-power"))
op = power;
if (!strcmp(argv[ac], "-seed"))
op = seed;
//if ( ! strcmp(argv[ac], "-restart") ) op = restart;
//if ( ! strcmp(argv[ac], "-save") ) op = save;
if (!strcmp(argv[ac], "-inm"))
op = inm;
if (!strcmp(argv[ac], "-ing"))
op = ing;
if (!strcmp(argv[ac], "-s"))
op = smth;
if (!strcmp(argv[ac], "-exp"))
op = exp1;
if (!strcmp(argv[ac], "-ceil"))
op = ceil1;
if (!strcmp(argv[ac], "-round"))
op = round1;
if (!strcmp(argv[ac], "-floor"))
op = floor1;
if (!strcmp(argv[ac], "-trunc"))
op = trunc1;
if (!strcmp(argv[ac], "-log"))
op = log1;
if (!strcmp(argv[ac], "-sin"))
op = sin1;
if (!strcmp(argv[ac], "-cos"))
op = cos1;
if (!strcmp(argv[ac], "-tan"))
op = tan1;
if (!strcmp(argv[ac], "-asin"))
op = asin1;
if (!strcmp(argv[ac], "-acos"))
op = acos1;
if (!strcmp(argv[ac], "-atan"))
op = atan1;
if (!strcmp(argv[ac], "-sqr"))
op = sqr1;
if (!strcmp(argv[ac], "-sqrt"))
op = sqrt1;
if (!strcmp(argv[ac], "-recip"))
op = recip1;
if (!strcmp(argv[ac], "-abs"))
op = abs1;
if (!strcmp(argv[ac], "-bin"))
op = bin1;
if (!strcmp(argv[ac], "-binv"))
op = binv1;
if (!strcmp(argv[ac], "-edge"))
op = edge1;
if (!strcmp(argv[ac], "-index"))
op = index1;
if (!strcmp(argv[ac], "-nan"))
op = nan1;
if (!strcmp(argv[ac], "-nanm"))
op = nanm1;
if (!strcmp(argv[ac], "-rand"))
op = rand1;
if (!strcmp(argv[ac], "-randn"))
op = randn1;
if (!strcmp(argv[ac], "-range"))
op = range1;
if (!strcmp(argv[ac], "-rank"))
op = rank1;
if (!strcmp(argv[ac], "-ranknorm"))
op = ranknorm1;
if (!strcmp(argv[ac], "-ztop"))
op = ztop1;
if (!strcmp(argv[ac], "-ptoz"))
op = ptoz1;
if (!strcmp(argv[ac], "-pval"))
op = pval1;
if (!strcmp(argv[ac], "-pval0"))
op = pval01;
if (!strcmp(argv[ac], "-cpval"))
op = cpval1;
//kernel operations
if (!strcmp(argv[ac], "-dilM"))
op = dilMk;
if (!strcmp(argv[ac], "-dilD"))
op = dilDk;
if (!strcmp(argv[ac], "-dilF"))
op = dilFk;
if (!strcmp(argv[ac], "-dilall"))
op = dilallk;
if (!strcmp(argv[ac], "-ero"))
op = erok;
if (!strcmp(argv[ac], "-eroF"))
op = eroFk;
if (!strcmp(argv[ac], "-fmedian"))
op = fmediank;
if (!strcmp(argv[ac], "-fmean"))
op = fmeank;
if (!strcmp(argv[ac], "-fmeanu"))
op = fmeanuk;
if (!strcmp(argv[ac], "-p")) {
ac++;
#if defined(_OPENMP)
int nProcessors = atoi(argv[ac]);
if (nProcessors < 1) {
omp_set_num_threads(maxNumThreads);
printfx("Using %d threads\n", maxNumThreads);
} else {
omp_set_num_threads(nProcessors);
printfx("Using %d threads\n", nProcessors);
}
#else
printfx("Warning: not compiled for OpenMP: '-p' ignored\n");
#endif
} else
//All Dimensionality reduction operations names begin with Capital letter, no other commands do!
if ((strlen(argv[ac]) > 4) && (argv[ac][0] == '-') && (isupper(argv[ac][1]))) { //isupper
int dim = 0;
switch (argv[ac][1]) {
case 'X': //
dim = 1;
break;
case 'Y': // code to be executed if n = 2;
dim = 2;
break;
case 'Z': //
dim = 3;
break;
case 'T': // code to be executed if n = 2;
dim = 4;
break;
}
if (dim == 0) {
printfx("Error: unknown dimensionality reduction operation: %s\n", argv[ac]);
goto fail;
}
if (strstr(argv[ac], "mean"))
ok = nifti_dim_reduce(nim, Tmean, dim, 0);
else if (strstr(argv[ac], "std"))
ok = nifti_dim_reduce(nim, Tstd, dim, 0);
else if (strstr(argv[ac], "maxn"))
ok = nifti_dim_reduce(nim, Tmaxn, dim, 0); //test maxn BEFORE max
else if (strstr(argv[ac], "max"))
ok = nifti_dim_reduce(nim, Tmax, dim, 0);
else if (strstr(argv[ac], "min"))
ok = nifti_dim_reduce(nim, Tmin, dim, 0);
else if (strstr(argv[ac], "median"))
ok = nifti_dim_reduce(nim, Tmedian, dim, 0);
else if (strstr(argv[ac], "perc")) {
ac++;
int pct = atoi(argv[ac]);
ok = nifti_dim_reduce(nim, Tperc, dim, pct);
} else if (strstr(argv[ac], "ar1"))
ok = nifti_dim_reduce(nim, Tar1, dim, 0);
else {
printfx("Error unknown dimensionality reduction operation: %s\n", argv[ac]);
ok = 1;
}
} else if (!strcmp(argv[ac], "-roi")) {
//int , int , int , int , int , int , int , int )
if ((argc - ac) < 8) {
printfx("not enough arguments for '-roi'\n"); //start.size for 4 dimensions: user might forget volumes
goto fail;
}
ac++;
int xmin = atoi(argv[ac]);
ac++;
int xsize = atoi(argv[ac]);
ac++;
int ymin = atoi(argv[ac]);
ac++;
int ysize = atoi(argv[ac]);
ac++;
int zmin = atoi(argv[ac]);
ac++;
int zsize = atoi(argv[ac]);
ac++;
int tmin = atoi(argv[ac]);
ac++;
int tsize = atoi(argv[ac]);
nifti_roi(nim, xmin, xsize, ymin, ysize, zmin, zsize, tmin, tsize);
} else if (!strcmp(argv[ac], "-bptfm")) {
ac++;
double hp_sigma = strtod(argv[ac], &end);
ac++;
double lp_sigma = strtod(argv[ac], &end);
ok = nifti_bptf(nim, hp_sigma, lp_sigma, 0);
} else if (!strcmp(argv[ac], "-bptf")) {
ac++;
double hp_sigma = strtod(argv[ac], &end);
ac++;
double lp_sigma = strtod(argv[ac], &end);
//ok = nifti_bptf(nim, hp_sigma, lp_sigma);
ok = nifti_bptf(nim, hp_sigma, lp_sigma, 1);
#ifdef bandpass
} else if (!strcmp(argv[ac], "-bandpass")) {
// niimath test4D -bandpass 0.08 0.008 0 c
ac++;
double lp_hz = strtod(argv[ac], &end);
ac++;
double hp_hz = strtod(argv[ac], &end);
ac++;
double TRsec = strtod(argv[ac], &end);
ok = nifti_bandpass(nim, lp_hz, hp_hz, TRsec);
#endif
} else if (!strcmp(argv[ac], "-roc")) {
//-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth>
//-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth>
ac++;
double thresh = strtod(argv[ac], &end);
ac++;
int outfile = ac;
char *fnoise = NULL;
if (thresh > 0.0) {
ac++;
fnoise = argv[ac];
}
ac++;
int truth = ac;
//ok = nifti_bptf(nim, hp_sigma, lp_sigma);
ok = nifti_roc(nim, fabs(thresh), argv[outfile], fnoise, argv[truth]);
if (ac >= argc) {
printfx("Error: no output filename specified!\n"); //e.g. volume size might differ
goto fail;
}
} else if (!strcmp(argv[ac], "-unsharp")) {
ac++;
double sigma = strtod(argv[ac], &end);
ac++;
double amount = strtod(argv[ac], &end);
nifti_unsharp(nim, sigma, sigma, sigma, amount);
} else if (strstr(argv[ac], "-otsu")) {
ac ++;
int mode = atoi(argv[ac]);
ok = nifti_otsu(nim, mode, 1);
} else if (strstr(argv[ac], "-dehaze")) {
ac ++;
int mode = atoi(argv[ac]);
int zeroFill = 0;
if (mode < 0) zeroFill = -1;
mode = abs(mode);
ok = nifti_otsu(nim, mode, zeroFill);
#ifdef bwlabelx
} else if (strstr(argv[ac], "-bwlabel")) {
ac ++;
int conn = atoi(argv[ac]);
ok = bwlabel(nim, conn);
#endif
} else if (!strcmp(argv[ac], "-h2c"))
ok = nifti_h2c(nim);
else if (!strcmp(argv[ac], "-c2h"))
ok = nifti_c2h(nim);
else if (!strcmp(argv[ac], "-subsamp2"))
ok = nifti_subsamp2(nim, 0);
else if (!strcmp(argv[ac], "-subsamp2offc"))
ok = nifti_subsamp2(nim, 1);
else if (!strcmp(argv[ac], "-sobel_binary"))
ok = nifti_sobel(nim, 1, 1);
else if (!strcmp(argv[ac], "-sobel"))
ok = nifti_sobel(nim, 1, 0);
else if (!strcmp(argv[ac], "-demean"))
ok = nifti_demean(nim);
else if (!strcmp(argv[ac], "-detrend"))
ok = nifti_detrend_linear(nim);
else if (!strcmp(argv[ac], "-resize")) {
ac++;
double X = strtod(argv[ac], &end);
ac++;
double Y = strtod(argv[ac], &end);
ac++;
double Z = strtod(argv[ac], &end);
ac++;
int interp_method = atoi(argv[ac]);
ok = nifti_resize(nim, X, Y, Z, interp_method);
} else if (!strcmp(argv[ac], "-crop")) {
ac++;
int tmin = atoi(argv[ac]);
ac++;
int tsize = atoi(argv[ac]);
ok = nifti_crop(nim, tmin, tsize);
} else if (!strcmp(argv[ac], "--compare")) { //--function terminates without saving image
ac++;
nifti_compare(nim, argv[ac]); //always terminates
} else if (!strcmp(argv[ac], "-edt"))
ok = nifti_edt(nim);
else if (!strcmp(argv[ac], "-fillh"))
ok = nifti_fillh(nim, 0);
else if (!strcmp(argv[ac], "-fillh26"))
ok = nifti_fillh(nim, 1);
else if (!strcmp(argv[ac], "-kernel")) {
ac++;
if (kernel != NULL)
_mm_free(kernel);
kernel = NULL;
if (!strcmp(argv[ac], "3D"))
kernel = make_kernel(nim, &nkernel, 3, 3, 3);
if (!strcmp(argv[ac], "2D"))
kernel = make_kernel(nim, &nkernel, 3, 3, 1);
if (!strcmp(argv[ac], "boxv")) {
ac++;
int vx = atoi(argv[ac]);
kernel = make_kernel(nim, &nkernel, vx, vx, vx);
}
if (!strcmp(argv[ac], "sphere")) {
ac++;
double mm = strtod(argv[ac], &end);
kernel = make_kernel_sphere(nim, &nkernel, mm);
}
if (!strcmp(argv[ac], "file")) {
ac++;
kernel = make_kernel_file(nim, &nkernel, argv[ac]);
}
if (!strcmp(argv[ac], "gauss")) {
ac++;
double mm = strtod(argv[ac], &end);
kernel = make_kernel_gauss(nim, &nkernel, mm);
}
if (!strcmp(argv[ac], "box")) { //all voxels in a cube of width <size> mm centered on target voxel");
ac++;
double mm = strtod(argv[ac], &end);
int vx = (2 * floor(mm / nim->dx)) + 1;
int vy = (2 * floor(mm / nim->dy)) + 1;
int vz = (2 * floor(mm / nim->dz)) + 1;
kernel = make_kernel(nim, &nkernel, vx, vy, vz);
}
if (!strcmp(argv[ac], "boxv3")) {
ac++;
int vx = atoi(argv[ac]);
ac++;
int vy = atoi(argv[ac]);
ac++;
int vz = atoi(argv[ac]);
kernel = make_kernel(nim, &nkernel, vx, vy, vz);
}
if (kernel == NULL) {
printfx("Error: '-kernel' option failed.\n"); //e.g. volume size might differ
ok = 1;
}
} else if (!strcmp(argv[ac], "-tensor_2lower")) {
ok = nifti_tensor_2(nim, 0);
} else if (!strcmp(argv[ac], "-tensor_2upper")) {
ok = nifti_tensor_2(nim, 1);
} else if (!strcmp(argv[ac], "-tensor_decomp")) {
ok = nifti_tensor_decomp(nim, 1);
} else if (!strcmp(argv[ac], "-tensor_decomp_lower")) {
ok = nifti_tensor_decomp(nim, 0);
} else if (!strcmp(argv[ac], "-save")) {
ac++;
char *fout2 = argv[ac];
if (nifti_set_filenames(nim, fout2, 1, 1))
ok = 1;
else {
nifti_save(nim, ""); //nifti_image_write( nim );
nifti_set_filenames(nim, fout, 1, 1);
}
} else if (!strcmp(argv[ac], "-restart")) {
if (kernel != NULL)
printfx("Warning: 'restart' resets the kernel\n"); //e.g. volume size might differ
nifti_image_free(nim);
if (kernel != NULL)
_mm_free(kernel);
kernel = make_kernel(nim, &nkernel, 3, 3, 3);
ac++;
nim = nifti_image_read(argv[ac], 1);
if (!nim)
ok = 1; //error
} else if (!strcmp(argv[ac], "-grid")) {
ac++;
double v = strtod(argv[ac], &end);
ac++;
int s = atoi(argv[ac]);
ok = nifti_grid(nim, v, s);
//} else if (!strcmp(argv[ac], "-dog")) {
} else if (strstr(argv[ac], "-dog")) {
int orient = 0;
if (strstr(argv[ac], "-dogx")) orient = 1;
if (strstr(argv[ac], "-dogy")) orient = 2;
if (strstr(argv[ac], "-dogz")) orient = 3;
if (strstr(argv[ac], "-dogr")) orient = -1;
ac++;
double pos = strtod(argv[ac], &end);
ac++;
double neg = strtod(argv[ac], &end);
ok = nifti_dog(nim, pos, neg, orient);
} else if (!strcmp(argv[ac], "-tfce")) {
ac++;
double H = strtod(argv[ac], &end);
ac++;
double E = strtod(argv[ac], &end);
ac++;
int c = atoi(argv[ac]);
ok = nifti_tfce(nim, H, E, c);
} else if (!strcmp(argv[ac], "-tfceS")) {
ac++;
double H = strtod(argv[ac], &end);
ac++;
double E = strtod(argv[ac], &end);
ac++;
int c = atoi(argv[ac]);
ac++;
int x = atoi(argv[ac]);
ac++;
int y = atoi(argv[ac]);
ac++;
int z = atoi(argv[ac]);
ac++;
double tfce_thresh = strtod(argv[ac], &end);
ok = nifti_tfceS(nim, H, E, c, x, y, z, tfce_thresh);
} else if (op == unknown) {
printfx("!!Error: unsupported operation '%s'\n", argv[ac]);
goto fail;
}
if ((op >= dilMk) && (op <= fmeanuk))
ok = nifti_kernel(nim, op, kernel, nkernel);
if ((op >= exp1) && (op <= ptoz1))
nifti_unary(nim, op);
if ((op >= add) && (op < exp1)) { //binary operations
ac++;
double v = strtod(argv[ac], &end);
//if (end == argv[ac]) {
if (strlen(argv[ac]) != (end - argv[ac])) { // "4d" will return numeric "4"
if ((op == power) || (op == clamp) || (op == uclamp) || (op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP) || (op == seed)) {
printfx("Error: '%s' expects numeric value\n", argv[ac - 1]);
goto fail;
} else
ok = nifti_binary(nim, argv[ac], op);
} else {
if (op == add)
ok = nifti_rescale(nim, 1.0, v);
if (op == sub)
ok = nifti_rescale(nim, 1.0, -v);
if (op == mul)
ok = nifti_rescale(nim, v, 0.0);
if (op == divX)
ok = nifti_rescale(nim, 1.0 / v, 0.0);
if (op == mod)
ok = nifti_rem(nim, v, 1);
if (op == rem)
ok = nifti_rem(nim, v, 0);
if (op == mas) {
printfx("Error: -mas expects image not number\n");
goto fail;
}
if (op == power)
ok = nifti_binary_power(nim, v);
if (op == thr)
ok = nifti_thr(nim, v, 0, 0.0);
if ((op == clamp) || (op == uclamp) || (op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP))
ok = nifti_thrp(nim, v, op);
if (op == uthr)
ok = nifti_thr(nim, v, 1, 0.0);
if (op == max)
ok = nifti_max(nim, v, 0);
if (op == min)
ok = nifti_max(nim, v, 1);
if (op == inm)
ok = nifti_inm(nim, v);
if (op == ing)
ok = nifti_ing(nim, v);
if (op == smth)
ok = nifti_smooth_gauss(nim, v, v, v, -6.0);
if (op == seed) {
if ((v > 0) && (v < 1))
v *= RAND_MAX;
srand((unsigned)fabs(v));
}
}
} //binary operations
if (ok != 0)
goto fail;
ac++;
}
//convert data to output type (-odt)
if (nifti_image_change_datatype(nim, dtOut, &ihdr) != 0)
return 1;
// if we get here, write the output dataset
nifti_save(nim, ""); //nifti_image_write( nim );
// and clean up memory
nifti_image_free(nim);
if (kernel != NULL)
_mm_free(kernel);
return 0;
fail:
nifti_image_free(nim);
if (kernel != NULL)
_mm_free(kernel);
return 1;
} //main()
#endif // #ifndef USING_WASM
#ifndef USING_WASM
#ifdef DT32
int main32(int argc, char *argv[]) {
#else
int main64(int argc, char *argv[]) {
#endif
char *fin = NULL, *fout = NULL;
//fslmaths in.nii out.nii changes datatype to flt, here we retain (similar to earlier versions of fslmaths)
//fslmsths in.nii -rem 10 out.nii uses integer modulus not fmod
//fslmaths robust range not fully described, this emulation is close
//fslmaths ing/inm are listed as "unary" but should be listed as binary
if (argc < 3) {
printfx("Fatal: show_help shown by wrapper function\n");
exit(1);
}
int dtCalc = DT_FLOAT32; //data type for calculation
int dtOut = DT_FLOAT32; //data type for calculation
int ac = 1;
// '-dt' sets datatype for calculations
if (!strcmp(argv[ac], "-dt")) {
if (!strcmp(argv[ac + 1], "double")) {
dtCalc = DT_FLOAT64;
} else if (strcmp(argv[ac + 1], "float")) {
printfx("'-dt' error: only float or double calculations supported\n");
return 1;
}
ac += 2;
if (argc < (ac + 2))
return 1; //insufficient arguments remain
}
//special case: pass through
// no calculation, simple pass through copy, e.g. "niimaths in.nii out.nii.gz"
// note fslmaths would save as flt type... but lossless conversion in native format is faster
// note here we use nifti_image_read not nifti_image_read2 to preserve cal_min, cal_max
if (ac + 2 == argc) {
fin = argv[ac]; // no string copy, just pointer assignment
ac++;
nifti_image *nim = nifti_image_read(fin, 1);
fout = argv[ac]; // no string copy, just pointer assignment
ac++;
if (nifti_set_filenames(nim, fout, 0, 1))
return 1;
nifti_save(nim, ""); //nifti_image_write( nim );
nifti_image_free(nim);
return 0;
} //end pass through
// next argument is input file
fin = argv[ac]; // no string copy, just pointer assignment
ac++;
//clock_t startTime = clock();
nifti_image *nim = nifti_image_read2(fin, 1);
if (!nim) {
printfx("** failed to read NIfTI image from '%s'\n", fin);
return 2;
}
//printf("read time: %ld ms\n", timediff(startTime, clock()));
in_hdr ihdr = set_input_hdr(nim);
//check for "-odt" must be last couplet
if (!strcmp(argv[argc - 2], "-odt")) {
if (!strcmp(argv[argc - 1], "double")) {
dtOut = DT_FLOAT64;
} else if (!strcmp(argv[argc - 1], "flt")) {
dtOut = DT_FLOAT32;
} else if (!strcmp(argv[argc - 1], "int")) {
dtOut = DT_INT32;
} else if (!strcmp(argv[argc - 1], "short")) {
dtOut = DT_INT16;
} else if (!strcmp(argv[argc - 1], "ushort")) {
dtOut = DT_UINT16;
} else if (!strcmp(argv[argc - 1], "char")) {
dtOut = DT_UINT8;
} else if (!strcmp(argv[argc - 1], "input")) {
dtOut = nim->datatype; //ihdr.datatype; //!
} else {
printfx("Error: Unknown datatype '%s' - Possible datatypes are: char short ushort int flt double input\n", argv[argc - 1]);
return 2;
}
argc = argc - 2;
} //odt
//convert data to calculation type (-dt)
if (nifti_image_change_datatype(nim, dtCalc, &ihdr) != 0)
return 1;
//check output filename, e.g does file exist
fout = argv[argc - 1]; // no string copy, just pointer assignment
if (nifti_set_filenames(nim, fout, 0, 1))
return 1;
argc = argc - 1;
#if defined(_OPENMP)
const int maxNumThreads = omp_get_max_threads();
const char *key = "AFNI_COMPRESSOR";
char *value;
value = getenv(key);
//export AFNI_COMPRESSOR=PIGZ
char pigzKey[5] = "PIGZ";
if ((value != NULL) && (strstr(value, pigzKey))) {
omp_set_num_threads(maxNumThreads);
printfx("Using %d threads\n", maxNumThreads);
} else {
omp_set_num_threads(1);
printfx("Single threaded\n");
}
#endif
#else
int mainWASM(nifti_image *nim, int argc, char *argv[]) {
int ac = 0;
#endif
//read operations
int nkernel = 0; //number of voxels in kernel
int *kernel = make_kernel(nim, &nkernel, 3, 3, 3);
char *end = NULL;
int ok = 0;
while (ac < argc) {
enum eOp op = unknown;
if (!strcmp(argv[ac], "-add"))
op = add;
if (!strcmp(argv[ac], "-sub"))
op = sub;
if (!strcmp(argv[ac], "-mul"))
op = mul;
if (!strcmp(argv[ac], "-div"))
op = divX;
if (!strcmp(argv[ac], "-rem"))
op = rem;
if (!strcmp(argv[ac], "-mod"))
op = mod;
if (!strcmp(argv[ac], "-mas"))
op = mas;
if (!strcmp(argv[ac], "-thr"))
op = thr;
if (!strcmp(argv[ac], "-thrp"))
op = thrp;
if (!strcmp(argv[ac], "-thrP"))
op = thrP;
if (!strcmp(argv[ac], "-uthr"))
op = uthr;
if (!strcmp(argv[ac], "-uthrp"))
op = uthrp;
if (!strcmp(argv[ac], "-uthrP"))
op = uthrP;
if (!strcmp(argv[ac], "-clamp"))
op = clamp;
if (!strcmp(argv[ac], "-uclamp"))
op = uclamp;
if (!strcmp(argv[ac], "-max"))
op = max;
if (!strcmp(argv[ac], "-min"))
op = min;
if (!strcmp(argv[ac], "-max"))
op = max;
//if ( ! strcmp(argv[ac], "-addtozero") ) op = addtozero; //variation of mas
//if ( ! strcmp(argv[ac], "-overadd") ) op = overadd; //variation of mas
if (!strcmp(argv[ac], "-power"))
op = power;
if (!strcmp(argv[ac], "-seed"))
op = seed;
//if ( ! strcmp(argv[ac], "-restart") ) op = restart;
//if ( ! strcmp(argv[ac], "-save") ) op = save;
if (!strcmp(argv[ac], "-inm"))
op = inm;
if (!strcmp(argv[ac], "-ing"))
op = ing;
if (!strcmp(argv[ac], "-s"))
op = smth;
if (!strcmp(argv[ac], "-exp"))
op = exp1;
if (!strcmp(argv[ac], "-ceil"))
op = ceil1;
if (!strcmp(argv[ac], "-round"))
op = ceil1;
if (!strcmp(argv[ac], "-floor"))
op = floor1;
if (!strcmp(argv[ac], "-trunc"))
op = trunc1;
if (!strcmp(argv[ac], "-log"))
op = log1;
if (!strcmp(argv[ac], "-sin"))
op = sin1;
if (!strcmp(argv[ac], "-cos"))
op = cos1;
if (!strcmp(argv[ac], "-tan"))
op = tan1;
if (!strcmp(argv[ac], "-asin"))
op = asin1;
if (!strcmp(argv[ac], "-acos"))
op = acos1;
if (!strcmp(argv[ac], "-atan"))
op = atan1;
if (!strcmp(argv[ac], "-sqr"))
op = sqr1;
if (!strcmp(argv[ac], "-sqrt"))
op = sqrt1;
if (!strcmp(argv[ac], "-recip"))
op = recip1;
if (!strcmp(argv[ac], "-abs"))
op = abs1;
if (!strcmp(argv[ac], "-bin"))
op = bin1;
if (!strcmp(argv[ac], "-binv"))
op = binv1;
if (!strcmp(argv[ac], "-edge"))
op = edge1;
if (!strcmp(argv[ac], "-index"))
op = index1;
if (!strcmp(argv[ac], "-nan"))
op = nan1;
if (!strcmp(argv[ac], "-nanm"))
op = nanm1;
if (!strcmp(argv[ac], "-rand"))
op = rand1;
if (!strcmp(argv[ac], "-randn"))
op = randn1;
if (!strcmp(argv[ac], "-range"))
op = range1;
if (!strcmp(argv[ac], "-rank"))
op = rank1;
if (!strcmp(argv[ac], "-ranknorm"))
op = ranknorm1;
if (!strcmp(argv[ac], "-ztop"))
op = ztop1;
if (!strcmp(argv[ac], "-ptoz"))
op = ptoz1;
if (!strcmp(argv[ac], "-pval"))
op = pval1;
if (!strcmp(argv[ac], "-pval0"))
op = pval01;
if (!strcmp(argv[ac], "-cpval"))
op = cpval1;
//kernel operations
if (!strcmp(argv[ac], "-dilM"))
op = dilMk;
if (!strcmp(argv[ac], "-dilD"))
op = dilDk;
if (!strcmp(argv[ac], "-dilF"))
op = dilFk;
if (!strcmp(argv[ac], "-dilall"))
op = dilallk;
if (!strcmp(argv[ac], "-ero"))
op = erok;
if (!strcmp(argv[ac], "-eroF"))
op = eroFk;
if (!strcmp(argv[ac], "-fmedian"))
op = fmediank;
if (!strcmp(argv[ac], "-fmean"))
op = fmeank;
if (!strcmp(argv[ac], "-fmeanu"))
op = fmeanuk;
if ((op >= exp1) && (op <= ptoz1))
nifti_unary(nim, op);
if (!strcmp(argv[ac], "-p")) {
ac++;
#if defined(_OPENMP)
int nProcessors = atoi(argv[ac]);
if (nProcessors < 1) {
omp_set_num_threads(maxNumThreads);
printfx("Using %d threads\n", maxNumThreads);
} else {
omp_set_num_threads(nProcessors);
printfx("Using %d threads\n", nProcessors);
}
#else
printfx("Warning: not compiled for OpenMP: '-p' ignored\n");
#endif
} else if ((strlen(argv[ac]) > 4) && (argv[ac][0] == '-') && (isupper(argv[ac][1]))) { //isupper
#ifndef USING_WASM //WASM does not (yet) adjust image size
//All Dimensionality reduction operations names begin with Capital letter, no other commands do!
int dim = 0;
switch (argv[ac][1]) {
case 'X': //
dim = 1;
break;
case 'Y': // code to be executed if n = 2;
dim = 2;
break;
case 'Z': //
dim = 3;
break;
case 'T': // code to be executed if n = 2;
dim = 4;
break;
}
if (dim == 0) {
printfx("Error: unknown dimensionality reduction operation: %s\n", argv[ac]);
goto fail;
}
if (strstr(argv[ac], "mean"))
ok = nifti_dim_reduce(nim, Tmean, dim, 0);
else if (strstr(argv[ac], "std"))
ok = nifti_dim_reduce(nim, Tstd, dim, 0);
else if (strstr(argv[ac], "maxn"))
ok = nifti_dim_reduce(nim, Tmaxn, dim, 0); //test maxn BEFORE max
else if (strstr(argv[ac], "max"))
ok = nifti_dim_reduce(nim, Tmax, dim, 0);
else if (strstr(argv[ac], "min"))
ok = nifti_dim_reduce(nim, Tmin, dim, 0);
else if (strstr(argv[ac], "median"))
ok = nifti_dim_reduce(nim, Tmedian, dim, 0);
else if (strstr(argv[ac], "perc")) {
ac++;
int pct = atoi(argv[ac]);
ok = nifti_dim_reduce(nim, Tperc, dim, pct);
} else if (strstr(argv[ac], "ar1"))
ok = nifti_dim_reduce(nim, Tar1, dim, 0);
else {
printfx("Error unknown dimensionality reduction operation: %s\n", argv[ac]);
ok = 1;
}
#endif //WASM does not (yet) adjust image size
} else if (!strcmp(argv[ac], "-roi")) {
//int , int , int , int , int , int , int , int )
if ((argc - ac) < 8) {
printfx("not enough arguments for '-roi'\n"); //start.size for 4 dimensions: user might forget volumes
goto fail;
}
ac++;
int xmin = atoi(argv[ac]);
ac++;
int xsize = atoi(argv[ac]);
ac++;
int ymin = atoi(argv[ac]);
ac++;
int ysize = atoi(argv[ac]);
ac++;
int zmin = atoi(argv[ac]);
ac++;
int zsize = atoi(argv[ac]);
ac++;
int tmin = atoi(argv[ac]);
ac++;
int tsize = atoi(argv[ac]);
nifti_roi(nim, xmin, xsize, ymin, ysize, zmin, zsize, tmin, tsize);
} else if (!strcmp(argv[ac], "-bptfm")) {
ac++;
double hp_sigma = strtod(argv[ac], &end);
ac++;
double lp_sigma = strtod(argv[ac], &end);
ok = nifti_bptf(nim, hp_sigma, lp_sigma, 0);
} else if (!strcmp(argv[ac], "-bptf")) {
ac++;
double hp_sigma = strtod(argv[ac], &end);
ac++;
double lp_sigma = strtod(argv[ac], &end);
//ok = nifti_bptf(nim, hp_sigma, lp_sigma);
ok = nifti_bptf(nim, hp_sigma, lp_sigma, 1);
#ifdef bandpass
} else if (!strcmp(argv[ac], "-bandpass")) {
// niimath test4D -bandpass 0.08 0.008 0 c
ac++;
double lp_hz = strtod(argv[ac], &end);
ac++;
double hp_hz = strtod(argv[ac], &end);
ac++;
double TRsec = strtod(argv[ac], &end);
ok = nifti_bandpass(nim, lp_hz, hp_hz, TRsec);
#endif
} else if (!strcmp(argv[ac], "-roc")) {
#ifndef USING_WASM //WASM does not (yet) support Area-under-ROC
//-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth>
//-roc <AROC-thresh> <outfile> [4Dnoiseonly] <truth>
ac++;
double thresh = strtod(argv[ac], &end);
ac++;
int outfile = ac;
char *fnoise = NULL;
if (thresh > 0.0) {
ac++;
fnoise = argv[ac];
}
ac++;
int truth = ac;
//ok = nifti_bptf(nim, hp_sigma, lp_sigma);
ok = nifti_roc(nim, fabs(thresh), argv[outfile], fnoise, argv[truth]);
if (ac >= argc) {
printfx("Error: no output filename specified!\n"); //e.g. volume size might differ
goto fail;
}
#endif //WASM does not (yet) support Area-under-ROC
} else if (!strcmp(argv[ac], "-unsharp")) {
ac++;
double sigma = strtod(argv[ac], &end);
ac++;
double amount = strtod(argv[ac], &end);
nifti_unsharp(nim, sigma, sigma, sigma, amount);
} else if (strstr(argv[ac], "-otsu")) {
ac ++;
int mode = atoi(argv[ac]);
ok = nifti_otsu(nim, mode, 1);
} else if (strstr(argv[ac], "-dehaze")) {
ac ++;
int mode = atoi(argv[ac]);
int zeroFill = 0;
if (mode < 0) zeroFill = -1;
mode = abs(mode);
ok = nifti_otsu(nim, mode, zeroFill);
#ifdef bwlabelx
} else if (strstr(argv[ac], "-bwlabel")) {
ac ++;
int conn = atoi(argv[ac]);
ok = bwlabel(nim, conn);
#endif
} else if (!strcmp(argv[ac], "-h2c"))
ok = nifti_h2c(nim);
else if (!strcmp(argv[ac], "-c2h"))
ok = nifti_c2h(nim);
else if (!strcmp(argv[ac], "-sobel_binary"))
ok = nifti_sobel(nim, 1, 1);
else if (!strcmp(argv[ac], "-sobel"))
ok = nifti_sobel(nim, 1, 0);
else if (!strcmp(argv[ac], "-demean"))
ok = nifti_demean(nim);
else if (!strcmp(argv[ac], "-detrend"))
ok = nifti_detrend_linear(nim);
#ifndef USING_WASM //WASM does not (yet) resize images
else if (!strcmp(argv[ac], "-subsamp2"))
ok = nifti_subsamp2(nim, 0);
else if (!strcmp(argv[ac], "-subsamp2offc"))
ok = nifti_subsamp2(nim, 1);
else if (!strcmp(argv[ac], "-resize")) {
ac++;
double X = strtod(argv[ac], &end);
ac++;
double Y = strtod(argv[ac], &end);
ac++;
double Z = strtod(argv[ac], &end);
ac++;
int interp_method = atoi(argv[ac]);
ok = nifti_resize(nim, X, Y, Z, interp_method);
} else if (!strcmp(argv[ac], "-crop")) {
ac++;
int tmin = atoi(argv[ac]);
ac++;
int tsize = atoi(argv[ac]);
ok = nifti_crop(nim, tmin, tsize);
} else if (!strcmp(argv[ac], "--compare")) { //--function terminates without saving image
ac++;
nifti_compare(nim, argv[ac]); //always terminates
}
#endif //WASM does not (yet) resize images
else if (!strcmp(argv[ac], "-edt"))
ok = nifti_edt(nim);
else if (!strcmp(argv[ac], "-fillh"))
ok = nifti_fillh(nim, 0);
else if (!strcmp(argv[ac], "-fillh26"))
ok = nifti_fillh(nim, 1);
else if (!strcmp(argv[ac], "-kernel")) {
ac++;
if (kernel != NULL)
_mm_free(kernel);
kernel = NULL;
if (!strcmp(argv[ac], "3D"))
kernel = make_kernel(nim, &nkernel, 3, 3, 3);
if (!strcmp(argv[ac], "2D"))
kernel = make_kernel(nim, &nkernel, 3, 3, 1);
if (!strcmp(argv[ac], "boxv")) {
ac++;
int vx = atoi(argv[ac]);
kernel = make_kernel(nim, &nkernel, vx, vx, vx);
}
if (!strcmp(argv[ac], "sphere")) {
ac++;
double mm = strtod(argv[ac], &end);
kernel = make_kernel_sphere(nim, &nkernel, mm);
}
#ifndef USING_WASM //WASM does not read files
if (!strcmp(argv[ac], "file")) {
ac++;
kernel = make_kernel_file(nim, &nkernel, argv[ac]);
}
#endif //WASM does not read files
if (!strcmp(argv[ac], "gauss")) {
ac++;
double mm = strtod(argv[ac], &end);
kernel = make_kernel_gauss(nim, &nkernel, mm);
}
if (!strcmp(argv[ac], "box")) { //all voxels in a cube of width <size> mm centered on target voxel");
ac++;
double mm = strtod(argv[ac], &end);
int vx = (2 * floor(mm / nim->dx)) + 1;
int vy = (2 * floor(mm / nim->dy)) + 1;
int vz = (2 * floor(mm / nim->dz)) + 1;
kernel = make_kernel(nim, &nkernel, vx, vy, vz);
}
if (!strcmp(argv[ac], "boxv3")) {
ac++;
int vx = atoi(argv[ac]);
ac++;
int vy = atoi(argv[ac]);
ac++;
int vz = atoi(argv[ac]);
kernel = make_kernel(nim, &nkernel, vx, vy, vz);
}
if (kernel == NULL) {
printfx("Error: '-kernel' option failed.\n"); //e.g. volume size might differ
ok = 1;
}
}
#ifndef USING_WASM //WASM does not handle tensors or file reads
else if (!strcmp(argv[ac], "-tensor_2lower")) {
ok = nifti_tensor_2(nim, 0);
} else if (!strcmp(argv[ac], "-tensor_2upper")) {
ok = nifti_tensor_2(nim, 1);
} else if (!strcmp(argv[ac], "-tensor_decomp")) {
ok = nifti_tensor_decomp(nim, 1);
} else if (!strcmp(argv[ac], "-tensor_decomp_lower")) {
ok = nifti_tensor_decomp(nim, 0);
} else if (!strcmp(argv[ac], "-save")) {
ac++;
char *fout2 = argv[ac];
if (nifti_set_filenames(nim, fout2, 1, 1))
ok = 1;
else {
nifti_save(nim, ""); //nifti_image_write( nim );
nifti_set_filenames(nim, fout, 1, 1);
}
}
else if (!strcmp(argv[ac], "-restart")) {
if (kernel != NULL)
printfx("Warning: 'restart' resets the kernel\n"); //e.g. volume size might differ
nifti_image_free(nim);
if (kernel != NULL)
_mm_free(kernel);
kernel = make_kernel(nim, &nkernel, 3, 3, 3);
ac++;
nim = nifti_image_read(argv[ac], 1);
if (!nim)
ok = 1; //error
}
#endif //WASM does not handle tensors or file reads
else if (!strcmp(argv[ac], "-grid")) {
ac++;
double v = strtod(argv[ac], &end);
ac++;
int s = atoi(argv[ac]);
ok = nifti_grid(nim, v, s);
} else if (strstr(argv[ac], "-dog")) {
int orient = 0;
if (strstr(argv[ac], "-dogx")) orient = 1;
if (strstr(argv[ac], "-dogy")) orient = 2;
if (strstr(argv[ac], "-dogz")) orient = 3;
if (strstr(argv[ac], "-dogr")) orient = -1;
ac++;
double pos = strtod(argv[ac], &end);
ac++;
double neg = strtod(argv[ac], &end);
ok = nifti_dog(nim, pos, neg, orient);
} else if (!strcmp(argv[ac], "-tfce")) {
ac++;
double H = strtod(argv[ac], &end);
ac++;
double E = strtod(argv[ac], &end);
ac++;
int c = atoi(argv[ac]);
ok = nifti_tfce(nim, H, E, c);
}
#ifndef USING_WASM //WASM does not support tfceS
else if (!strcmp(argv[ac], "-tfceS")) {
ac++;
double H = strtod(argv[ac], &end);
ac++;
double E = strtod(argv[ac], &end);
ac++;
int c = atoi(argv[ac]);
ac++;
int x = atoi(argv[ac]);
ac++;
int y = atoi(argv[ac]);
ac++;
int z = atoi(argv[ac]);
ac++;
double tfce_thresh = strtod(argv[ac], &end);
ok = nifti_tfceS(nim, H, E, c, x, y, z, tfce_thresh);
}
#endif //WASM does not support tfceS
else if (op == unknown) {
printfx("!!Error: unsupported operation '%s'\n", argv[ac]);
goto fail;
}
if ((op >= dilMk) && (op <= fmeanuk))
ok = nifti_kernel(nim, op, kernel, nkernel);
if ((op >= exp1) && (op <= ptoz1))
nifti_unary(nim, op);
if ((op >= add) && (op < exp1)) { //binary operations
ac++;
double v = strtod(argv[ac], &end);
//if (end == argv[ac]) {
if (strlen(argv[ac]) != (end - argv[ac])) { // "4d" will return numeric "4"
if ((op == power) || (op == clamp) || (op == uclamp) || (op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP) || (op == seed)) {
printfx("Error: '%s' expects numeric value\n", argv[ac - 1]);
goto fail;
} else
#ifdef USING_WASM
ok = 123; //WASM does not read files
#else
ok = nifti_binary(nim, argv[ac], op);
#endif
} else {
if (op == add)
ok = nifti_rescale(nim, 1.0, v);
if (op == sub)
ok = nifti_rescale(nim, 1.0, -v);
if (op == mul)
ok = nifti_rescale(nim, v, 0.0);
if (op == divX)
ok = nifti_rescale(nim, 1.0 / v, 0.0);
if (op == mod)
ok = nifti_rem(nim, v, 1);
if (op == rem)
ok = nifti_rem(nim, v, 0);
if (op == mas) {
printfx("Error: -mas expects image not number\n");
goto fail;
}
if (op == power)
ok = nifti_binary_power(nim, v);
if (op == thr)
ok = nifti_thr(nim, v, 0, 0.0);
if ((op == clamp) || (op == uclamp) || (op == thrp) || (op == thrP) || (op == uthrp) || (op == uthrP))
ok = nifti_thrp(nim, v, op);
if (op == uthr)
ok = nifti_thr(nim, v, 1, 0.0);
if (op == max)
ok = nifti_max(nim, v, 0);
if (op == min)
ok = nifti_max(nim, v, 1);
if (op == inm)
ok = nifti_inm(nim, v);
if (op == ing)
ok = nifti_ing(nim, v);
if (op == smth)
ok = nifti_smooth_gauss(nim, v, v, v, -6.0);
if (op == seed) {
if ((v > 0) && (v < 1))
v *= RAND_MAX;
srand((unsigned)fabs(v));
}
}
} //binary operations
if (ok != 0) return ok;
ac++;
}
#ifndef USING_WASM
//convert data to output type (-odt)
if (nifti_image_change_datatype(nim, dtOut, &ihdr) != 0)
return 1;
// if we get here, write the output dataset
nifti_save(nim, ""); //nifti_image_write( nim );
// and clean up memory
nifti_image_free(nim);
#endif
if (kernel != NULL)
_mm_free(kernel);
return 0;
fail:
#ifndef USING_WASM
nifti_image_free(nim);
#endif
if (kernel != NULL)
_mm_free(kernel);
return 1;
}
//All code below for WASM reading
#ifdef USING_WASM
static char* splitArgv(char **str, char **word){
const char QUOTE = '\'';
bool inquotes = false;
// optimization
if( **str == 0 )
return NULL;
// Skip leading spaces.
while (**str && isspace(**str))
(*str)++;
if( **str == '\0')
return NULL;
// Phrase in quotes is one arg
if( **str == QUOTE ){
(*str)++;
inquotes = true;
}
// Set phrase begining
*word = *str;
// Skip all chars if in quotes
if( inquotes ){
while( **str && **str!=QUOTE )
(*str)++;
//if( **str!= QUOTE )
}else{
// Skip non-space characters.
while( **str && !isspace(**str) )
(*str)++;
}
// Null terminate the phrase and set `str` pointer to next symbol
if(**str)
*(*str)++ = '\0';
return *str;
}
char* parseStrToArgcArgvInsitu( char *str, const int argc_MAX, int *argc, char* argv[] ) {
*argc = 0;
while( *argc<argc_MAX-1 && splitArgv(&str, &argv[*argc]) ){
++(*argc);
if( *str == '\0' )
break;
}
argv[*argc] = NULL;
return str;
};
float clampf(float d, float min, float max) {
float t = d < min ? min : d;
return t > max ? max : t;
}
__attribute__((used)) int niimath (void *img, int datatype, int nx, int ny, int nz, int nt, float dx, float dy, float dz, float dt, char * cmdstr){
int nvox = nx * ny * nz * MAX(nt, 1);
if (nvox < 1) return 101;
#define argc_MAX 128
char* argv[argc_MAX] = {0};
int argc=0;
char* rest = parseStrToArgcArgvInsitu(cmdstr,argc_MAX,&argc,argv);
if( *rest!='\0' )
return 1;
nifti_image nim;
nim.data = img;
nim.nvox = nvox;
nim.nx = nx;
nim.ny = ny;
nim.nz = nz;
nim.nt = nt;
nim.dx = dx;
nim.dy = dy;
nim.dz = dz;
nim.dt = dt;
nim.scl_slope = 1.0;
nim.scl_inter = 0.0;
nim.cal_max = 1.0;
nim.cal_min = 0.0;
nim.datatype = DT_FLOAT;
if (datatype == DT_FLOAT)
return mainWASM(&nim, argc, argv);//niimath_core(&nim, cmdstr);
if (datatype == DT_SIGNED_SHORT) {
int16_t *img16 = (int16_t *)img;
float *img32 = (float *) _mm_malloc(nvox * sizeof(float), 64);
for (int i = 0; i < nvox; ++i)
img32[i] = img16[i];
nim.data = img32;
int ret = mainWASM(&nim, argc, argv);
for (int i = 0; i < nvox; ++i)
img16[i] = clampf(img32[i], -32768.0, 32767.0);//img32[i];
_mm_free(img32);
return ret;
}
if (datatype == DT_UNSIGNED_CHAR) {
uint8_t *img8 = (uint8_t *)img;
float *img32 = (float *) _mm_malloc(nvox * sizeof(float), 64);
for (int i = 0; i < nvox; ++i)
img32[i] = img8[i];
nim.data = img32;
int ret = mainWASM(&nim, argc, argv);
for (int i = 0; i < nvox; ++i)
img8[i] = clampf(img32[i], 0.0, 255.0); //img32[i];
_mm_free(img32);
return ret;
}
return 88;
}
#endif //WASM code
|
isogeometric_posteriori_estimator.h | //
// Project Name: Kratos
// Last Modified by: $Author: hbui $
// Date: $Date: 28 May 2015 $
// Revision: $Revision: 1.0 $
//
//
#if !defined(KRATOS_ISOGEOMETRIC_POSTERIORI_ESTIMATOR_H_INCLUDED )
#define KRATOS_ISOGEOMETRIC_POSTERIORI_ESTIMATOR_H_INCLUDED
// System includes
#include <string>
#include <vector>
#include <iostream>
// External includes
#include <omp.h>
#include "boost/progress.hpp"
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "utilities/math_utils.h"
#include "spaces/ublas_space.h"
#include "linear_solvers/linear_solver.h"
#include "isogeometric_application/custom_geometries/isogeometric_geometry.h"
namespace Kratos
{
///@addtogroup ApplicationNameApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
extern Variable<bool> HAS_STRAIN_AT_NODE;
extern Variable<bool> HAS_STRESSES_AT_NODE;
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/*** Detail class definition.
*/
class IsogeometricPosterioriEstimator
{
public:
///@name Type Definitions
///@{
typedef typename ModelPart::NodesContainerType NodesArrayType;
typedef typename ModelPart::ElementsContainerType ElementsArrayType;
typedef typename ModelPart::ConditionsContainerType ConditionsArrayType;
typedef typename Element::GeometryType GeometryType;
typedef IsogeometricGeometry<GeometryType::PointType> IsogeometricGeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
typedef UblasSpace<double, CompressedMatrix, Vector> SerialSparseSpaceType;
typedef UblasSpace<double, Matrix, Vector> SerialDenseSpaceType;
typedef LinearSolver<SerialSparseSpaceType, SerialDenseSpaceType> LinearSolverType;
/// Pointer definition of IsogeometricPosterioriEstimator
KRATOS_CLASS_POINTER_DEFINITION(IsogeometricPosterioriEstimator);
///@}
///@name Life Cycle
///@{
/// Default constructor.
IsogeometricPosterioriEstimator()
{
}
/// Destructor.
virtual ~IsogeometricPosterioriEstimator()
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/// Simple posteriori error estimator based on nodal stress & strain
/// Reference: Matlab Implementation of the Finite Element Method in Elasticity, Alberty et al
double ComputeSimplePosterioriError(ModelPart& r_model_part);
void ComputeSimplePosterioriErrorOnNodes(const Variable<double>& rThisVariable,
ModelPart& r_model_part,
LinearSolverType::Pointer pSolver);
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
std::stringstream buffer;
buffer << "A collection of posteriori estimators for isogeometric method";
return buffer.str();
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
rOStream << Info();
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/// Calculate vector variable (Stress, Strain) at a local point
Vector& CalculateOnPoint(const Variable<Vector>& rVariable,
Vector& rResult,
Element::Pointer& pElement,
const CoordinatesArrayType& rCoordinates)
{
Vector N;
pElement->GetGeometry().ShapeFunctionsValues(N, rCoordinates);
for(unsigned int i = 0; i < pElement->GetGeometry().size(); ++i)
{
Vector& NodalValues = pElement->GetGeometry()[i].GetSolutionStepValue(rVariable);
if(i == 0)
{
if(rResult.size() != NodalValues.size())
rResult.resize(NodalValues.size());
noalias(rResult) = N( i ) * NodalValues;
}
else
{
noalias(rResult) += N( i ) * NodalValues;
}
}
return rResult;
}
//**********AUXILIARY FUNCTION**************************************************************
//******************************************************************************************
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while ( i != endit && (*i) != candidate)
{
++i;
}
if( i == endit )
{
v.push_back(candidate);
}
}
//**********AUXILIARY FUNCTION**************************************************************
//******************************************************************************************
void ConstructMatrixStructure (
SerialSparseSpaceType::MatrixType& A,
ElementsArrayType& rElements,
std::map<unsigned int, unsigned int> MapNodeIdToVec,
ProcessInfo& CurrentProcessInfo
)
{
std::size_t equation_size = A.size1();
std::vector<std::vector<std::size_t> > indices(equation_size);
Element::EquationIdVectorType ids;
for(typename ElementsArrayType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; ++i_element)
{
ids.resize((i_element)->GetGeometry().size());
for(unsigned int i = 0; i < (i_element)->GetGeometry().size(); ++i)
ids[i] = MapNodeIdToVec[(i_element)->GetGeometry()[i].Id()];
for(std::size_t i = 0 ; i < ids.size() ; ++i)
{
if(ids[i] < equation_size)
{
std::vector<std::size_t>& row_indices = indices[ids[i]];
for(std::size_t j = 0 ; j < ids.size() ; ++j)
{
if(ids[j] < equation_size)
AddUnique(row_indices, ids[j]);
}
}
}
}
//allocating the memory needed
int data_size = 0;
for(std::size_t i = 0 ; i < indices.size() ; ++i)
{
data_size += indices[i].size();
}
A.reserve(data_size, false);
//filling with zero the matrix (creating the structure)
#ifndef _OPENMP
for(std::size_t i = 0 ; i < indices.size() ; i++)
{
std::vector<std::size_t>& row_indices = indices[i];
std::sort(row_indices.begin(), row_indices.end());
for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++)
{
A.push_back(i, *it, 0.00);
}
row_indices.clear();
}
#else
int number_of_threads = omp_get_max_threads();
vector<unsigned int> matrix_partition;
CreatePartition(number_of_threads, indices.size(), matrix_partition);
for( int k=0; k < number_of_threads; ++k )
{
#pragma omp parallel
if( omp_get_thread_num() == k )
{
for( std::size_t i = matrix_partition[k]; i < matrix_partition[k+1]; i++ )
{
std::vector<std::size_t>& row_indices = indices[i];
std::sort(row_indices.begin(), row_indices.end());
for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++)
{
A.push_back(i, *it, 0.00);
}
row_indices.clear();
}
}
}
#endif
}
//**********AUXILIARY FUNCTION**************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(unsigned int i = 1; i < number_of_threads; ++i)
partitions[i] = partitions[i-1] + partition_size ;
}
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
IsogeometricPosterioriEstimator& operator=(IsogeometricPosterioriEstimator const& rOther)
{
return *this;
}
/// Copy constructor.
IsogeometricPosterioriEstimator(IsogeometricPosterioriEstimator const& rOther)
{
}
///@}
}; // Class IsogeometricPosterioriEstimator
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >>(std::istream& rIStream, IsogeometricPosterioriEstimator& rThis)
{
return rIStream;
}
/// output stream function
inline std::ostream& operator <<(std::ostream& rOStream,
const IsogeometricPosterioriEstimator& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
}// namespace Kratos.
#undef DEBUG_LEVEL1
#undef DEBUG_LEVEL2
#undef DEBUG_MULTISOLVE
#undef DEBUG_GENERATE_MESH
#undef ENABLE_PROFILING
#endif
|
LAGraph_grread.c | //------------------------------------------------------------------------------
// LAGraph_grread: read a matrix from a binary file
//------------------------------------------------------------------------------
// LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved.
// SPDX-License-Identifier: BSD-2-Clause
//
// See additional acknowledgments in the LICENSE file,
// or contact permission@sei.cmu.edu for the full terms.
//------------------------------------------------------------------------------
// FIXME: this is not yet included in the test coverage suite
// LAGraph_grread: read a matrix from a binary file.
// Contributed by Tim Davis, Texas A&M, based on the Galois graph reader
// file format.
// The file format consists of a header, with the following content:
// uint64_t version : either 1 or 2. 1: nodes are 2^32, 2: nodes are
// 64 bit. This value is returned to the caller, but is otherwise
// unused.
// uint64_t esize : the size of the edge weight, as sizeof (edgetype).
// For example, if the file contains edge weights of type int32_t,
// esize is sizeof (int32_t) == 4. The caller must specify the
// corresponding GrB_Type, and its size must match esize.
// uint64_t n : the number of node in the graph. The GrB_Matrix is
// n-by-n. Rectangular matrices are not supported by this format.
// uint64_t e : the number of edges in the graph
// This header is followed by a matrix in CSR format:
// Gp : an array of size ((n+1) * sizeof (uint64_t)) bytes, but Gp [0] = 0
// does not appear in the file. This section of the file is thus
// (n * sizeof (uint64_t)) bytes in length.
// Gj : an array of size (e * sizeof (int32_t)), containing the adjaceny
// lists. Note that the indices are 32 bit, not 64 bit, and thus
// this format is limited to graphs with n < 2^32.
// Gx : an array of size (e * esize), containing the edge weights.
// LAgraph_grread returns its status: GrB_SUCCESS if succesful,
// GrB_OUT_OF_MEMORY if out of memory, GrB_INVALID_VALUE if a file I/O error
// occurs or the edge size is not what was expected.
#include <LAGraph.h>
#include <LAGraphX.h>
//------------------------------------------------------------------------------
// gr_header
//------------------------------------------------------------------------------
// The gr_header specifies the first 4 * sizeof(uint64_t) bytes of the file.
typedef struct
{
uint64_t version ; // either 1 or 2.
// 1: node id's are in the range 0 to 2^32
// 2: node id's are in the range 0 to 2^64
uint64_t esize ; // sizeof (edgetype)
uint64_t n ; // # of nodes in the graph
uint64_t e ; // # of edges in the graph
}
gr_header ;
//------------------------------------------------------------------------------
// LAGraph_binary_read
//------------------------------------------------------------------------------
// Read a block of binary data from a file. Returns GrB_SUCCESS if successful,
// GrB_INVALID_VALUE otherwise.
static GrB_Info LAGraph_binary_read
(
char *name, // name of array being read in
FILE *fp, // file to read from
void *buffer, // buffer of size nbytes to read into
size_t n, // # of elements to read
size_t size // size of each element
)
{
if (fp == NULL)
{
fprintf (stderr, "LAGraph_grread: file I/O error\n") ;
return (GrB_INVALID_VALUE) ;
}
size_t n_read = fread (buffer, size, n, fp) ;
if (n_read != n)
{
fprintf (stderr, "LAGraph_grread: file I/O error; expected %g items"
", got %g, object %s, size %g\n", (double) n_read, (double) n,
name, (double) size) ;
return (GrB_INVALID_VALUE) ;
}
return (GrB_SUCCESS) ;
}
//------------------------------------------------------------------------------
// LAGraph_FREE_ALL
//------------------------------------------------------------------------------
// Free all allocated space; used only for error return.
#define LAGraph_FREE_ALL \
{ \
GrB_free (G) ; \
LAGraph_Free ((void**)&Gp) ; \
LAGraph_Free ((void**)&Gj) ; \
LAGraph_Free ((void**)&Gj_32) ; \
LAGraph_Free ((void**)&Gx) ; \
if (fp != NULL) fclose (fp) ; \
fp = NULL ; \
}
//------------------------------------------------------------------------------
// LAGraph_grread
//------------------------------------------------------------------------------
GrB_Info LAGraph_grread // read a matrix from a binary file
(
GrB_Matrix *G, // handle of matrix to create
uint64_t *G_version, // the version in the file
const char *filename, // name of file to open
GrB_Type gtype // type of matrix to read, NULL if no edge weights
// (in that case, G has type GrB_BOOL with all
// edge weights equal to 1).
)
{
#if !defined(LG_SUITESPARSE)
return GrB_PANIC;
#else
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GrB_Info info ;
GrB_Index *Gp = NULL ;
int32_t *Gj_32 = NULL ;
GrB_Index *Gj = NULL ;
void *Gx = NULL ;
FILE *fp = NULL ;
if (G == NULL || G_version == NULL || filename == NULL)
{
LAGRAPH_ERROR ("invalid input arguments", GrB_NULL_POINTER) ;
}
(*G) = NULL ;
(*G_version) = 0 ;
//--------------------------------------------------------------------------
// open the file
//--------------------------------------------------------------------------
fp = fopen (filename, "r") ;
if (fp == NULL)
{
fprintf (stderr, "LAGraph_grread: file not found: %s\n", filename) ;
LAGRAPH_ERROR ("input file not found", GrB_INVALID_VALUE) ;
}
//--------------------------------------------------------------------------
// open the file and read the gr_header
//--------------------------------------------------------------------------
gr_header header ;
LAGRAPH_OK (LAGraph_binary_read ("header",
fp, &header, 1, sizeof (gr_header))) ;
uint64_t version = header.version ; // version, 1 or 2
uint64_t esize = header.esize ; // sizeof (edge type)
uint64_t n = header.n ; // # of nodes
uint64_t e = header.e ; // # of edges
(*G_version) = version ;
size_t esize_expected = 0 ;
if (gtype != NULL)
{
LAGRAPH_OK (GxB_Type_size (&esize_expected, gtype)) ;
}
if (esize != esize_expected)
{
fprintf (stderr, "LAGraph_grread: esize in file (%g) does not match"
" gtype size (%g)\n", (double) esize, (double) esize_expected) ;
LAGRAPH_ERROR ("unexpected edge size", GrB_INVALID_VALUE) ;
}
if (! (version == 1 || version == 2))
{
LAGRAPH_ERROR ("invalid version, must be 1 or 2", GrB_INVALID_VALUE) ;
}
if (version == 1 && n > UINT32_MAX)
{
LAGRAPH_ERROR ("problem too large", GrB_INVALID_VALUE) ;
}
//--------------------------------------------------------------------------
// allocate and read in the pointers
//--------------------------------------------------------------------------
Gp = LAGraph_Malloc (n+1, sizeof (GrB_Index)) ;
if (Gp == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
Gp [0] = 0 ;
LAGRAPH_OK (LAGraph_binary_read ("pointers",
fp, Gp+1, n, sizeof (GrB_Index))) ;
//--------------------------------------------------------------------------
// allocate and read in the indices
//--------------------------------------------------------------------------
Gj = LAGraph_Malloc (e, sizeof (GrB_Index)) ;
if (Gj == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
if (version == 1)
{
//----------------------------------------------------------------------
// indices are in 32-bit format in the file
//----------------------------------------------------------------------
// allocate workspace for a single chunk
#define CHUNK (10 * 1024 * 1024)
int64_t chunk = LAGraph_MIN (CHUNK, e) ;
Gj_32 = LAGraph_Malloc (chunk, sizeof (int32_t)) ;
if (Gj_32 == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
// read in the indices one chunk at a time
for (int64_t k = 0 ; k < e ; k += CHUNK)
{
// read in the next chunk
int64_t chunk = LAGraph_MIN (CHUNK, e-k) ;
LAGRAPH_OK (LAGraph_binary_read ("indices",
fp, Gj_32, chunk, sizeof (int32_t))) ;
// convert the chunk to 64-bit
#pragma omp parallel for schedule(static)
for (GrB_Index p = 0 ; p < chunk ; p++)
{
Gj [k + p] = (GrB_Index) Gj_32 [p] ;
}
}
LAGraph_Free ((void**)&Gj_32) ; Gj_32 = NULL;
}
else
{
//----------------------------------------------------------------------
// indices are in 64-bit format in the file
//----------------------------------------------------------------------
LAGRAPH_OK (LAGraph_binary_read ("indices",
fp, Gj, e, sizeof (GrB_Index))) ;
}
//--------------------------------------------------------------------------
// read in the values
//--------------------------------------------------------------------------
bool no_edge_weights = (gtype == NULL) ;
if (no_edge_weights)
{
// the input file has no edge weights
gtype = GrB_BOOL ;
esize = sizeof (bool) ;
}
Gx = LAGraph_Malloc (e, esize) ;
if (Gx == NULL) LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
if (no_edge_weights)
{
// set all edge weights to boolean true
bool *Gbool = (bool *) Gx ;
#pragma omp parallel for schedule(static)
for (GrB_Index p = 0 ; p < e ; p++)
{
Gbool [p] = true ;
}
}
else
{
// read in the edge weights
LAGRAPH_OK (LAGraph_binary_read ("edgeweights", fp, Gx, e, esize)) ;
}
//--------------------------------------------------------------------------
// import the data into the GrB_Matrix
//--------------------------------------------------------------------------
#if GxB_IMPLEMENTATION < GxB_VERSION (5,0,0)
#error "SuiteSparse v5.0.0 or later required"
#endif
GrB_Index Gp_size = (n+1) * sizeof (GrB_Index) ;
GrB_Index Gj_size = (e) * sizeof (GrB_Index) ;
GrB_Index Gx_size = (e) * esize ;
LAGRAPH_OK (GxB_Matrix_import_CSR (G, gtype, n, n,
&Gp, &Gj, &Gx, Gp_size, Gj_size, Gx_size, false, false, NULL)) ;
//--------------------------------------------------------------------------
// close the file and return result
//--------------------------------------------------------------------------
fclose (fp) ;
return (GrB_SUCCESS) ;
#endif
}
|
hadvuv5th.h | #ifndef HADVUV5TH_H
#define HADVUV5TH_H
ElementType advectionDriver(const Storage3D field, const int64_t i, const int64_t j, const int64_t k,
const ElementType uavg, const ElementType vavg, const ElementType eddlat,
const ElementType eddlon) {
ElementType result_x = 0.0;
ElementType result_y = 0.0;
if (uavg > 0) {
result_x = uavg * (ElementType(1.0) / ElementType(30.0) * field(i - 3, j, k) +
ElementType(-1.0) / ElementType(4.0) * field(i - 2, j, k) + field(i - 1, j, k) +
ElementType(-1.0) / ElementType(3.0) * field(i, j, k) +
ElementType(-1.0) / ElementType(2.0) * field(i + 1, j, k) +
ElementType(1.0) / ElementType(20.0) * field(i + 2, j, k));
} else {
result_x = -uavg * (ElementType(1.0) / ElementType(20.0) * field(i - 2, j, k) +
ElementType(-1.0) / ElementType(2.0) * field(i - 1, j, k) +
ElementType(-1.0) / ElementType(3.0) * field(i, j, k) + field(i + 1, j, k) +
ElementType(-1.0) / ElementType(4.0) * field(i + 2, j, k) +
ElementType(1.0) / ElementType(30.0) * field(i + 3, j, k));
}
if (vavg > 0) {
result_y = vavg * (ElementType(1.0) / ElementType(30.0) * field(i, j - 3, k) +
ElementType(-1.0) / ElementType(4.0) * field(i, j - 2, k) + field(i, j - 1, k) +
ElementType(-1.0) / ElementType(3.0) * field(i, j, k) +
ElementType(-1.0) / ElementType(2.0) * field(i, j + 1, k) +
ElementType(1.0) / ElementType(20.0) * field(i, j + 2, k));
} else {
result_y = -vavg * (ElementType(1.0) / ElementType(20.0) * field(i, j - 2, k) +
ElementType(-1.0) / ElementType(2.0) * field(i, j - 1, k) +
ElementType(-1.0) / ElementType(3.0) * field(i, j, k) + field(i, j + 1, k) +
ElementType(-1.0) / ElementType(4.0) * field(i, j + 2, k) +
ElementType(1.0) / ElementType(30.0) * field(i, j + 3, k));
}
return eddlat * result_x + eddlon * result_y;
}
void hadvuv5th(Storage3D& uout, Storage3D& vout, const Storage3D& uin, const Storage3D& vin, const Storage1D& acrlat0,
const Storage1D& acrlat1, const Storage1D& tgrlatda0, const Storage1D& tgrlatda1, Storage3D& uatupos,
Storage3D& vatupos, Storage3D& uatvpos, Storage3D& vatvpos, Storage3D& uavg, Storage3D& vavg,
Storage3D& ures, Storage3D& vres, const ElementType eddlat, const ElementType eddlon) {
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
uatupos(i, j, k) = (ElementType(1.0) / ElementType(3.0)) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i + 1, j, k));
vatupos(i, j, k) =
ElementType(0.25) * (vin(i + 1, j, k) + vin(i + 1, j - 1, k) + vin(i, j, k) + vin(i, j - 1, k));
}
}
}
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
uavg(i, j, k) = acrlat0(j) * uatupos(i, j, k);
vavg(i, j, k) = EARTH_RADIUS_RECIP * vatupos(i, j, k);
}
}
}
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
ures(i, j, k) = advectionDriver(uin, i, j, k, uavg(i, j, k), vavg(i, j, k), eddlat, eddlon);
}
}
}
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
uout(i, j, k) = ures(i, j, k) + tgrlatda0(j) * uin(i, j, k) * vatupos(i, j, k);
}
}
}
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
uatvpos(i, j, k) =
ElementType(0.25) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i, j + 1, k) + uin(i - 1, j + 1, k));
vatvpos(i, j, k) = ElementType(1.0) / ElementType(3.0) * (vin(i, j - 1, k) + vin(i, j, k) + vin(i, j + 1, k));
}
}
}
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
uavg(i, j, k) = acrlat1(j) * uatvpos(i, j, k);
vavg(i, j, k) = EARTH_RADIUS_RECIP * vatvpos(i, j, k);
}
}
}
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
vres(i, j, k) = advectionDriver(vin, i, j, k, uavg(i, j, k), vavg(i, j, k), eddlat, eddlon);
}
}
}
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
vout(i, j, k) = vres(i, j, k) - tgrlatda1(j) * uatvpos(i, j, k) * uatvpos(i, j, k);
}
}
}
}
void hadvuv5th_fullfusion(Storage3D& uout, Storage3D& vout, const Storage3D& uin, const Storage3D& vin, const Storage1D& acrlat0,
const Storage1D& acrlat1, const Storage1D& tgrlatda0, const Storage1D& tgrlatda1, Storage3D& uatupos,
Storage3D& vatupos, Storage3D& uatvpos, Storage3D& vatvpos, Storage3D& uavg, Storage3D& vavg,
Storage3D& ures, Storage3D& vres, const ElementType eddlat, const ElementType eddlon) {
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
auto _uatupos = (ElementType(1.0) / ElementType(3.0)) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i + 1, j, k));
auto _vatupos = ElementType(0.25) * (vin(i + 1, j, k) + vin(i + 1, j - 1, k) + vin(i, j, k) + vin(i, j - 1, k));
auto _uavg = acrlat0(j) * _uatupos;
auto _vavg = EARTH_RADIUS_RECIP * _vatupos;
auto _ures = advectionDriver(uin, i, j, k, _uavg, _vavg, eddlat, eddlon);
uout(i, j, k) = _ures + tgrlatda0(j) * uin(i, j, k) * _vatupos;
auto _uatvpos = ElementType(0.25) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i, j + 1, k) + uin(i - 1, j + 1, k));
auto _vatvpos = ElementType(1.0) / ElementType(3.0) * (vin(i, j - 1, k) + vin(i, j, k) + vin(i, j + 1, k));
_uavg = acrlat1(j) * _uatvpos;
_vavg = EARTH_RADIUS_RECIP * _vatvpos;
auto _vres = advectionDriver(vin, i, j, k, _uavg, _vavg, eddlat, eddlon);
vout(i, j, k) = _vres - tgrlatda1(j) * _uatvpos * _uatvpos;
}
}
}
}
void hadvuv5th_partialfusion(Storage3D& uout, Storage3D& vout, const Storage3D& uin, const Storage3D& vin, const Storage1D& acrlat0,
const Storage1D& acrlat1, const Storage1D& tgrlatda0, const Storage1D& tgrlatda1, Storage3D& uatupos,
Storage3D& vatupos, Storage3D& uatvpos, Storage3D& vatvpos, Storage3D& uavg, Storage3D& vavg,
Storage3D& ures, Storage3D& vres, const ElementType eddlat, const ElementType eddlon) {
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
auto _uatupos = (ElementType(1.0) / ElementType(3.0)) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i + 1, j, k));
auto _vatupos = ElementType(0.25) * (vin(i + 1, j, k) + vin(i + 1, j - 1, k) + vin(i, j, k) + vin(i, j - 1, k));
auto _uavg = acrlat0(j) * _uatupos;
auto _vavg = EARTH_RADIUS_RECIP * _vatupos;
auto _ures = advectionDriver(uin, i, j, k, _uavg, _vavg, eddlat, eddlon);
uout(i, j, k) = _ures + tgrlatda0(j) * uin(i, j, k) * _vatupos;
}
}
}
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
auto _uatvpos =
ElementType(0.25) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i, j + 1, k) + uin(i - 1, j + 1, k));
auto _vatvpos = ElementType(1.0) / ElementType(3.0) * (vin(i, j - 1, k) + vin(i, j, k) + vin(i, j + 1, k));
auto _uavg = acrlat1(j) * _uatvpos;
auto _vavg = EARTH_RADIUS_RECIP * _vatvpos;
auto _vres = advectionDriver(vin, i, j, k, _uavg, _vavg, eddlat, eddlon);
vout(i, j, k) = _vres - tgrlatda1(j) * _uatvpos * _uatvpos;
}
}
}
}
void hadvuv5th_openmp(Storage3D& uout, Storage3D& vout, const Storage3D& uin, const Storage3D& vin, const Storage1D& acrlat0,
const Storage1D& acrlat1, const Storage1D& tgrlatda0, const Storage1D& tgrlatda1, Storage3D& uatupos,
Storage3D& vatupos, Storage3D& uatvpos, Storage3D& vatvpos, Storage3D& uavg, Storage3D& vavg,
Storage3D& ures, Storage3D& vres, const ElementType eddlat, const ElementType eddlon) {
#pragma omp parallel for
for (int64_t k = 0; k < domain_height; ++k) {
for (int64_t i = 0; i < domain_size; ++i) {
for (int64_t j = 0; j < domain_size; ++j) {
auto _uatupos = (ElementType(1.0) / ElementType(3.0)) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i + 1, j, k));
auto _vatupos = ElementType(0.25) * (vin(i + 1, j, k) + vin(i + 1, j - 1, k) + vin(i, j, k) + vin(i, j - 1, k));
auto _uavg = acrlat0(j) * _uatupos;
auto _vavg = EARTH_RADIUS_RECIP * _vatupos;
auto _ures = advectionDriver(uin, i, j, k, _uavg, _vavg, eddlat, eddlon);
uout(i, j, k) = _ures + tgrlatda0(j) * uin(i, j, k) * _vatupos;
auto _uatvpos = ElementType(0.25) * (uin(i - 1, j, k) + uin(i, j, k) + uin(i, j + 1, k) + uin(i - 1, j + 1, k));
auto _vatvpos = ElementType(1.0) / ElementType(3.0) * (vin(i, j - 1, k) + vin(i, j, k) + vin(i, j + 1, k));
_uavg = acrlat1(j) * _uatvpos;
_vavg = EARTH_RADIUS_RECIP * _vatvpos;
auto _vres = advectionDriver(vin, i, j, k, _uavg, _vavg, eddlat, eddlon);
vout(i, j, k) = _vres - tgrlatda1(j) * _uatvpos * _uatvpos;
}
}
}
}
#endif // HADVUV5TH_H
|
implicit_blender.c | /*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) Blender Foundation
* All rights reserved.
*/
/** \file
* \ingroup bph
*/
#include "implicit.h"
#ifdef IMPLICIT_SOLVER_BLENDER
# include "MEM_guardedalloc.h"
# include "DNA_scene_types.h"
# include "DNA_object_types.h"
# include "DNA_object_force_types.h"
# include "DNA_meshdata_types.h"
# include "DNA_texture_types.h"
# include "BLI_math.h"
# include "BLI_utildefines.h"
# include "BKE_cloth.h"
# include "BKE_collision.h"
# include "BKE_effect.h"
# include "BPH_mass_spring.h"
# ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wtype-limits"
# endif
# ifdef _OPENMP
# define CLOTH_OPENMP_LIMIT 512
# endif
//#define DEBUG_TIME
# ifdef DEBUG_TIME
# include "PIL_time.h"
# endif
static float I[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}};
static float ZERO[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}};
# if 0
# define C99
# ifdef C99
# defineDO_INLINE inline
# else
# defineDO_INLINE static
# endif
# endif /* if 0 */
struct Cloth;
//////////////////////////////////////////
/* fast vector / matrix library, enhancements are welcome :) -dg */
/////////////////////////////////////////
/* DEFINITIONS */
typedef float lfVector[3];
typedef struct fmatrix3x3 {
float m[3][3]; /* 3x3 matrix */
unsigned int c, r; /* column and row number */
/* int pinned; // is this vertex allowed to move? */
float n1, n2, n3; /* three normal vectors for collision constrains */
unsigned int vcount; /* vertex count */
unsigned int scount; /* spring count */
} fmatrix3x3;
///////////////////////////
// float[3] vector
///////////////////////////
/* simple vector code */
/* STATUS: verified */
DO_INLINE void mul_fvector_S(float to[3], float from[3], float scalar)
{
to[0] = from[0] * scalar;
to[1] = from[1] * scalar;
to[2] = from[2] * scalar;
}
/* simple v^T * v product ("outer product") */
/* STATUS: HAS TO BE verified (*should* work) */
DO_INLINE void mul_fvectorT_fvector(float to[3][3], float vectorA[3], float vectorB[3])
{
mul_fvector_S(to[0], vectorB, vectorA[0]);
mul_fvector_S(to[1], vectorB, vectorA[1]);
mul_fvector_S(to[2], vectorB, vectorA[2]);
}
/* simple v^T * v product with scalar ("outer product") */
/* STATUS: HAS TO BE verified (*should* work) */
DO_INLINE void mul_fvectorT_fvectorS(float to[3][3], float vectorA[3], float vectorB[3], float aS)
{
mul_fvectorT_fvector(to, vectorA, vectorB);
mul_fvector_S(to[0], to[0], aS);
mul_fvector_S(to[1], to[1], aS);
mul_fvector_S(to[2], to[2], aS);
}
# if 0
/* printf vector[3] on console: for debug output */
static void print_fvector(float m3[3])
{
printf("%f\n%f\n%f\n\n", m3[0], m3[1], m3[2]);
}
///////////////////////////
// long float vector float (*)[3]
///////////////////////////
/* print long vector on console: for debug output */
DO_INLINE void print_lfvector(float (*fLongVector)[3], unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
print_fvector(fLongVector[i]);
}
}
# endif
/* create long vector */
DO_INLINE lfVector *create_lfvector(unsigned int verts)
{
/* TODO: check if memory allocation was successful */
return (lfVector *)MEM_callocN(verts * sizeof(lfVector), "cloth_implicit_alloc_vector");
// return (lfVector *)cloth_aligned_malloc(&MEMORY_BASE, verts * sizeof(lfVector));
}
/* delete long vector */
DO_INLINE void del_lfvector(float (*fLongVector)[3])
{
if (fLongVector != NULL) {
MEM_freeN(fLongVector);
// cloth_aligned_free(&MEMORY_BASE, fLongVector);
}
}
/* copy long vector */
DO_INLINE void cp_lfvector(float (*to)[3], float (*from)[3], unsigned int verts)
{
memcpy(to, from, verts * sizeof(lfVector));
}
/* init long vector with float[3] */
DO_INLINE void init_lfvector(float (*fLongVector)[3], float vector[3], unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
copy_v3_v3(fLongVector[i], vector);
}
}
/* zero long vector with float[3] */
DO_INLINE void zero_lfvector(float (*to)[3], unsigned int verts)
{
memset(to, 0.0f, verts * sizeof(lfVector));
}
/* multiply long vector with scalar*/
DO_INLINE void mul_lfvectorS(float (*to)[3],
float (*fLongVector)[3],
float scalar,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
mul_fvector_S(to[i], fLongVector[i], scalar);
}
}
/* multiply long vector with scalar*/
/* A -= B * float */
DO_INLINE void submul_lfvectorS(float (*to)[3],
float (*fLongVector)[3],
float scalar,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
VECSUBMUL(to[i], fLongVector[i], scalar);
}
}
/* dot product for big vector */
DO_INLINE float dot_lfvector(float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
unsigned int verts)
{
long i = 0;
float temp = 0.0;
// XXX brecht, disabled this for now (first schedule line was already disabled),
// due to non-commutative nature of floating point ops this makes the sim give
// different results each time you run it!
// schedule(guided, 2)
//#pragma omp parallel for reduction(+: temp) if (verts > CLOTH_OPENMP_LIMIT)
for (i = 0; i < (long)verts; i++) {
temp += dot_v3v3(fLongVectorA[i], fLongVectorB[i]);
}
return temp;
}
/* A = B + C --> for big vector */
DO_INLINE void add_lfvector_lfvector(float (*to)[3],
float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
add_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]);
}
}
/* A = B + C * float --> for big vector */
DO_INLINE void add_lfvector_lfvectorS(float (*to)[3],
float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
float bS,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
VECADDS(to[i], fLongVectorA[i], fLongVectorB[i], bS);
}
}
/* A = B * float + C * float --> for big vector */
DO_INLINE void add_lfvectorS_lfvectorS(float (*to)[3],
float (*fLongVectorA)[3],
float aS,
float (*fLongVectorB)[3],
float bS,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
VECADDSS(to[i], fLongVectorA[i], aS, fLongVectorB[i], bS);
}
}
/* A = B - C * float --> for big vector */
DO_INLINE void sub_lfvector_lfvectorS(float (*to)[3],
float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
float bS,
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
VECSUBS(to[i], fLongVectorA[i], fLongVectorB[i], bS);
}
}
/* A = B - C --> for big vector */
DO_INLINE void sub_lfvector_lfvector(float (*to)[3],
float (*fLongVectorA)[3],
float (*fLongVectorB)[3],
unsigned int verts)
{
unsigned int i = 0;
for (i = 0; i < verts; i++) {
sub_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]);
}
}
///////////////////////////
// 3x3 matrix
///////////////////////////
# if 0
/* printf 3x3 matrix on console: for debug output */
static void print_fmatrix(float m3[3][3])
{
printf("%f\t%f\t%f\n", m3[0][0], m3[0][1], m3[0][2]);
printf("%f\t%f\t%f\n", m3[1][0], m3[1][1], m3[1][2]);
printf("%f\t%f\t%f\n\n", m3[2][0], m3[2][1], m3[2][2]);
}
static void print_sparse_matrix(fmatrix3x3 *m)
{
if (m) {
unsigned int i;
for (i = 0; i < m[0].vcount + m[0].scount; i++) {
printf("%d:\n", i);
print_fmatrix(m[i].m);
}
}
}
# endif
# if 0
static void print_lvector(lfVector *v, int numverts)
{
int i;
for (i = 0; i < numverts; ++i) {
if (i > 0) {
printf("\n");
}
printf("%f,\n", v[i][0]);
printf("%f,\n", v[i][1]);
printf("%f,\n", v[i][2]);
}
}
# endif
# if 0
static void print_bfmatrix(fmatrix3x3 *m)
{
int tot = m[0].vcount + m[0].scount;
int size = m[0].vcount * 3;
float *t = MEM_callocN(sizeof(float) * size * size, "bfmatrix");
int q, i, j;
for (q = 0; q < tot; ++q) {
int k = 3 * m[q].r;
int l = 3 * m[q].c;
for (j = 0; j < 3; ++j) {
for (i = 0; i < 3; ++i) {
// if (t[k + i + (l + j) * size] != 0.0f) {
// printf("warning: overwriting value at %d, %d\n", m[q].r, m[q].c);
// }
if (k == l) {
t[k + i + (k + j) * size] += m[q].m[i][j];
}
else {
t[k + i + (l + j) * size] += m[q].m[i][j];
t[l + j + (k + i) * size] += m[q].m[j][i];
}
}
}
}
for (j = 0; j < size; ++j) {
if (j > 0 && j % 3 == 0) {
printf("\n");
}
for (i = 0; i < size; ++i) {
if (i > 0 && i % 3 == 0) {
printf(" ");
}
implicit_print_matrix_elem(t[i + j * size]);
}
printf("\n");
}
MEM_freeN(t);
}
# endif
/* copy 3x3 matrix */
DO_INLINE void cp_fmatrix(float to[3][3], float from[3][3])
{
// memcpy(to, from, sizeof (float) * 9);
copy_v3_v3(to[0], from[0]);
copy_v3_v3(to[1], from[1]);
copy_v3_v3(to[2], from[2]);
}
/* copy 3x3 matrix */
DO_INLINE void initdiag_fmatrixS(float to[3][3], float aS)
{
cp_fmatrix(to, ZERO);
to[0][0] = aS;
to[1][1] = aS;
to[2][2] = aS;
}
# if 0
/* calculate determinant of 3x3 matrix */
DO_INLINE float det_fmatrix(float m[3][3])
{
return m[0][0] * m[1][1] * m[2][2] + m[1][0] * m[2][1] * m[0][2] + m[0][1] * m[1][2] * m[2][0] -
m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[2][0] * m[1][1] * m[0][2];
}
DO_INLINE void inverse_fmatrix(float to[3][3], float from[3][3])
{
unsigned int i, j;
float d;
if ((d = det_fmatrix(from)) == 0) {
printf("can't build inverse");
exit(0);
}
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
int i1 = (i + 1) % 3;
int i2 = (i + 2) % 3;
int j1 = (j + 1) % 3;
int j2 = (j + 2) % 3;
/** Reverse indexes i&j to take transpose. */
to[j][i] = (from[i1][j1] * from[i2][j2] - from[i1][j2] * from[i2][j1]) / d;
/**
* <pre>
* if (i == j) {
* to[i][j] = 1.0f / from[i][j];
* }
* else {
* to[i][j] = 0;
* }
* </pre>
*/
}
}
}
# endif
/* 3x3 matrix multiplied by a scalar */
/* STATUS: verified */
DO_INLINE void mul_fmatrix_S(float matrix[3][3], float scalar)
{
mul_fvector_S(matrix[0], matrix[0], scalar);
mul_fvector_S(matrix[1], matrix[1], scalar);
mul_fvector_S(matrix[2], matrix[2], scalar);
}
/* a vector multiplied by a 3x3 matrix */
/* STATUS: verified */
DO_INLINE void mul_fvector_fmatrix(float *to, float *from, float matrix[3][3])
{
to[0] = matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2];
to[1] = matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2];
to[2] = matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2];
}
/* 3x3 matrix multiplied by a vector */
/* STATUS: verified */
DO_INLINE void mul_fmatrix_fvector(float *to, float matrix[3][3], float from[3])
{
to[0] = dot_v3v3(matrix[0], from);
to[1] = dot_v3v3(matrix[1], from);
to[2] = dot_v3v3(matrix[2], from);
}
/* 3x3 matrix addition with 3x3 matrix */
DO_INLINE void add_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3])
{
add_v3_v3v3(to[0], matrixA[0], matrixB[0]);
add_v3_v3v3(to[1], matrixA[1], matrixB[1]);
add_v3_v3v3(to[2], matrixA[2], matrixB[2]);
}
/* A -= B*x + C*y (3x3 matrix sub-addition with 3x3 matrix) */
DO_INLINE void subadd_fmatrixS_fmatrixS(
float to[3][3], float matrixA[3][3], float aS, float matrixB[3][3], float bS)
{
VECSUBADDSS(to[0], matrixA[0], aS, matrixB[0], bS);
VECSUBADDSS(to[1], matrixA[1], aS, matrixB[1], bS);
VECSUBADDSS(to[2], matrixA[2], aS, matrixB[2], bS);
}
/* A = B - C (3x3 matrix subtraction with 3x3 matrix) */
DO_INLINE void sub_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3])
{
sub_v3_v3v3(to[0], matrixA[0], matrixB[0]);
sub_v3_v3v3(to[1], matrixA[1], matrixB[1]);
sub_v3_v3v3(to[2], matrixA[2], matrixB[2]);
}
/////////////////////////////////////////////////////////////////
// special functions
/////////////////////////////////////////////////////////////////
/* 3x3 matrix multiplied+added by a vector */
/* STATUS: verified */
DO_INLINE void muladd_fmatrix_fvector(float to[3], float matrix[3][3], float from[3])
{
to[0] += dot_v3v3(matrix[0], from);
to[1] += dot_v3v3(matrix[1], from);
to[2] += dot_v3v3(matrix[2], from);
}
DO_INLINE void muladd_fmatrixT_fvector(float to[3], float matrix[3][3], float from[3])
{
to[0] += matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2];
to[1] += matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2];
to[2] += matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2];
}
BLI_INLINE void outerproduct(float r[3][3], const float a[3], const float b[3])
{
mul_v3_v3fl(r[0], a, b[0]);
mul_v3_v3fl(r[1], a, b[1]);
mul_v3_v3fl(r[2], a, b[2]);
}
BLI_INLINE void cross_m3_v3m3(float r[3][3], const float v[3], float m[3][3])
{
cross_v3_v3v3(r[0], v, m[0]);
cross_v3_v3v3(r[1], v, m[1]);
cross_v3_v3v3(r[2], v, m[2]);
}
BLI_INLINE void cross_v3_identity(float r[3][3], const float v[3])
{
r[0][0] = 0.0f;
r[1][0] = v[2];
r[2][0] = -v[1];
r[0][1] = -v[2];
r[1][1] = 0.0f;
r[2][1] = v[0];
r[0][2] = v[1];
r[1][2] = -v[0];
r[2][2] = 0.0f;
}
BLI_INLINE void madd_m3_m3fl(float r[3][3], float m[3][3], float f)
{
r[0][0] += m[0][0] * f;
r[0][1] += m[0][1] * f;
r[0][2] += m[0][2] * f;
r[1][0] += m[1][0] * f;
r[1][1] += m[1][1] * f;
r[1][2] += m[1][2] * f;
r[2][0] += m[2][0] * f;
r[2][1] += m[2][1] * f;
r[2][2] += m[2][2] * f;
}
/////////////////////////////////////////////////////////////////
///////////////////////////
// SPARSE SYMMETRIC big matrix with 3x3 matrix entries
///////////////////////////
/* printf a big matrix on console: for debug output */
# if 0
static void print_bfmatrix(fmatrix3x3 *m3)
{
unsigned int i = 0;
for (i = 0; i < m3[0].vcount + m3[0].scount; i++) {
print_fmatrix(m3[i].m);
}
}
# endif
BLI_INLINE void init_fmatrix(fmatrix3x3 *matrix, int r, int c)
{
matrix->r = r;
matrix->c = c;
}
/* create big matrix */
DO_INLINE fmatrix3x3 *create_bfmatrix(unsigned int verts, unsigned int springs)
{
// TODO: check if memory allocation was successful */
fmatrix3x3 *temp = (fmatrix3x3 *)MEM_callocN(sizeof(fmatrix3x3) * (verts + springs),
"cloth_implicit_alloc_matrix");
int i;
temp[0].vcount = verts;
temp[0].scount = springs;
/* vertex part of the matrix is diagonal blocks */
for (i = 0; i < verts; ++i) {
init_fmatrix(temp + i, i, i);
}
return temp;
}
/* delete big matrix */
DO_INLINE void del_bfmatrix(fmatrix3x3 *matrix)
{
if (matrix != NULL) {
MEM_freeN(matrix);
}
}
/* copy big matrix */
DO_INLINE void cp_bfmatrix(fmatrix3x3 *to, fmatrix3x3 *from)
{
// TODO bounds checking
memcpy(to, from, sizeof(fmatrix3x3) * (from[0].vcount + from[0].scount));
}
/* init big matrix */
// slow in parallel
DO_INLINE void init_bfmatrix(fmatrix3x3 *matrix, float m3[3][3])
{
unsigned int i;
for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) {
cp_fmatrix(matrix[i].m, m3);
}
}
/* init the diagonal of big matrix */
// slow in parallel
DO_INLINE void initdiag_bfmatrix(fmatrix3x3 *matrix, float m3[3][3])
{
unsigned int i, j;
float tmatrix[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}};
for (i = 0; i < matrix[0].vcount; i++) {
cp_fmatrix(matrix[i].m, m3);
}
for (j = matrix[0].vcount; j < matrix[0].vcount + matrix[0].scount; j++) {
cp_fmatrix(matrix[j].m, tmatrix);
}
}
/* SPARSE SYMMETRIC multiply big matrix with long vector*/
/* STATUS: verified */
DO_INLINE void mul_bfmatrix_lfvector(float (*to)[3], fmatrix3x3 *from, lfVector *fLongVector)
{
unsigned int vcount = from[0].vcount;
lfVector *temp = create_lfvector(vcount);
zero_lfvector(to, vcount);
# pragma omp parallel sections if (vcount > CLOTH_OPENMP_LIMIT)
{
# pragma omp section
{
for (unsigned int i = from[0].vcount; i < from[0].vcount + from[0].scount; i++) {
/* This is the lower triangle of the sparse matrix,
* therefore multiplication occurs with transposed submatrices. */
muladd_fmatrixT_fvector(to[from[i].c], from[i].m, fLongVector[from[i].r]);
}
}
# pragma omp section
{
for (unsigned int i = 0; i < from[0].vcount + from[0].scount; i++) {
muladd_fmatrix_fvector(temp[from[i].r], from[i].m, fLongVector[from[i].c]);
}
}
}
add_lfvector_lfvector(to, to, temp, from[0].vcount);
del_lfvector(temp);
}
/* SPARSE SYMMETRIC sub big matrix with big matrix*/
/* A -= B * float + C * float --> for big matrix */
/* VERIFIED */
DO_INLINE void subadd_bfmatrixS_bfmatrixS(
fmatrix3x3 *to, fmatrix3x3 *from, float aS, fmatrix3x3 *matrix, float bS)
{
unsigned int i = 0;
/* process diagonal elements */
for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) {
subadd_fmatrixS_fmatrixS(to[i].m, from[i].m, aS, matrix[i].m, bS);
}
}
///////////////////////////////////////////////////////////////////
// simulator start
///////////////////////////////////////////////////////////////////
typedef struct Implicit_Data {
/* inputs */
fmatrix3x3 *bigI; /* identity (constant) */
fmatrix3x3 *tfm; /* local coordinate transform */
fmatrix3x3 *M; /* masses */
lfVector *F; /* forces */
fmatrix3x3 *dFdV, *dFdX; /* force jacobians */
int num_blocks; /* number of off-diagonal blocks (springs) */
/* motion state data */
lfVector *X, *Xnew; /* positions */
lfVector *V, *Vnew; /* velocities */
/* internal solver data */
lfVector *B; /* B for A*dV = B */
fmatrix3x3 *A; /* A for A*dV = B */
lfVector *dV; /* velocity change (solution of A*dV = B) */
lfVector *z; /* target velocity in constrained directions */
fmatrix3x3 *S; /* filtering matrix for constraints */
fmatrix3x3 *P, *Pinv; /* pre-conditioning matrix */
} Implicit_Data;
Implicit_Data *BPH_mass_spring_solver_create(int numverts, int numsprings)
{
Implicit_Data *id = (Implicit_Data *)MEM_callocN(sizeof(Implicit_Data), "implicit vecmat");
/* process diagonal elements */
id->tfm = create_bfmatrix(numverts, 0);
id->A = create_bfmatrix(numverts, numsprings);
id->dFdV = create_bfmatrix(numverts, numsprings);
id->dFdX = create_bfmatrix(numverts, numsprings);
id->S = create_bfmatrix(numverts, 0);
id->Pinv = create_bfmatrix(numverts, numsprings);
id->P = create_bfmatrix(numverts, numsprings);
id->bigI = create_bfmatrix(numverts, numsprings); // TODO 0 springs
id->M = create_bfmatrix(numverts, numsprings);
id->X = create_lfvector(numverts);
id->Xnew = create_lfvector(numverts);
id->V = create_lfvector(numverts);
id->Vnew = create_lfvector(numverts);
id->F = create_lfvector(numverts);
id->B = create_lfvector(numverts);
id->dV = create_lfvector(numverts);
id->z = create_lfvector(numverts);
initdiag_bfmatrix(id->bigI, I);
return id;
}
void BPH_mass_spring_solver_free(Implicit_Data *id)
{
del_bfmatrix(id->tfm);
del_bfmatrix(id->A);
del_bfmatrix(id->dFdV);
del_bfmatrix(id->dFdX);
del_bfmatrix(id->S);
del_bfmatrix(id->P);
del_bfmatrix(id->Pinv);
del_bfmatrix(id->bigI);
del_bfmatrix(id->M);
del_lfvector(id->X);
del_lfvector(id->Xnew);
del_lfvector(id->V);
del_lfvector(id->Vnew);
del_lfvector(id->F);
del_lfvector(id->B);
del_lfvector(id->dV);
del_lfvector(id->z);
MEM_freeN(id);
}
/* ==== Transformation from/to root reference frames ==== */
BLI_INLINE void world_to_root_v3(Implicit_Data *data, int index, float r[3], const float v[3])
{
copy_v3_v3(r, v);
mul_transposed_m3_v3(data->tfm[index].m, r);
}
BLI_INLINE void root_to_world_v3(Implicit_Data *data, int index, float r[3], const float v[3])
{
mul_v3_m3v3(r, data->tfm[index].m, v);
}
BLI_INLINE void world_to_root_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3])
{
float trot[3][3];
copy_m3_m3(trot, data->tfm[index].m);
transpose_m3(trot);
mul_m3_m3m3(r, trot, m);
}
BLI_INLINE void root_to_world_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3])
{
mul_m3_m3m3(r, data->tfm[index].m, m);
}
/* ================================ */
DO_INLINE void filter(lfVector *V, fmatrix3x3 *S)
{
unsigned int i = 0;
for (i = 0; i < S[0].vcount; i++) {
mul_m3_v3(S[i].m, V[S[i].r]);
}
}
/* this version of the CG algorithm does not work very well with partial constraints
* (where S has non-zero elements). */
# if 0
static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S)
{
// Solves for unknown X in equation AX=B
unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100;
float conjgrad_epsilon = 0.0001f /* , conjgrad_lasterror=0 */ /* UNUSED */;
lfVector *q, *d, *tmp, *r;
float s, starget, a, s_prev;
unsigned int numverts = lA[0].vcount;
q = create_lfvector(numverts);
d = create_lfvector(numverts);
tmp = create_lfvector(numverts);
r = create_lfvector(numverts);
// zero_lfvector(ldV, CLOTHPARTICLES);
filter(ldV, S);
add_lfvector_lfvector(ldV, ldV, z, numverts);
// r = B - Mul(tmp, A, X); // just use B if X known to be zero
cp_lfvector(r, lB, numverts);
mul_bfmatrix_lfvector(tmp, lA, ldV);
sub_lfvector_lfvector(r, r, tmp, numverts);
filter(r, S);
cp_lfvector(d, r, numverts);
s = dot_lfvector(r, r, numverts);
starget = s * sqrtf(conjgrad_epsilon);
while (s > starget && conjgrad_loopcount < conjgrad_looplimit) {
// Mul(q, A, d); // q = A*d;
mul_bfmatrix_lfvector(q, lA, d);
filter(q, S);
a = s / dot_lfvector(d, q, numverts);
// X = X + d*a;
add_lfvector_lfvectorS(ldV, ldV, d, a, numverts);
// r = r - q*a;
sub_lfvector_lfvectorS(r, r, q, a, numverts);
s_prev = s;
s = dot_lfvector(r, r, numverts);
//d = r+d*(s/s_prev);
add_lfvector_lfvectorS(d, r, d, (s / s_prev), numverts);
filter(d, S);
conjgrad_loopcount++;
}
/* conjgrad_lasterror = s; */ /* UNUSED */
del_lfvector(q);
del_lfvector(d);
del_lfvector(tmp);
del_lfvector(r);
// printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount);
return conjgrad_loopcount <
conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable
}
# endif
static int cg_filtered(lfVector *ldV,
fmatrix3x3 *lA,
lfVector *lB,
lfVector *z,
fmatrix3x3 *S,
ImplicitSolverResult *result)
{
// Solves for unknown X in equation AX=B
unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100;
float conjgrad_epsilon = 0.01f;
unsigned int numverts = lA[0].vcount;
lfVector *fB = create_lfvector(numverts);
lfVector *AdV = create_lfvector(numverts);
lfVector *r = create_lfvector(numverts);
lfVector *c = create_lfvector(numverts);
lfVector *q = create_lfvector(numverts);
lfVector *s = create_lfvector(numverts);
float bnorm2, delta_new, delta_old, delta_target, alpha;
cp_lfvector(ldV, z, numverts);
/* d0 = filter(B)^T * P * filter(B) */
cp_lfvector(fB, lB, numverts);
filter(fB, S);
bnorm2 = dot_lfvector(fB, fB, numverts);
delta_target = conjgrad_epsilon * conjgrad_epsilon * bnorm2;
/* r = filter(B - A * dV) */
mul_bfmatrix_lfvector(AdV, lA, ldV);
sub_lfvector_lfvector(r, lB, AdV, numverts);
filter(r, S);
/* c = filter(P^-1 * r) */
cp_lfvector(c, r, numverts);
filter(c, S);
/* delta = r^T * c */
delta_new = dot_lfvector(r, c, numverts);
# ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT
printf("==== A ====\n");
print_bfmatrix(lA);
printf("==== z ====\n");
print_lvector(z, numverts);
printf("==== B ====\n");
print_lvector(lB, numverts);
printf("==== S ====\n");
print_bfmatrix(S);
# endif
while (delta_new > delta_target && conjgrad_loopcount < conjgrad_looplimit) {
mul_bfmatrix_lfvector(q, lA, c);
filter(q, S);
alpha = delta_new / dot_lfvector(c, q, numverts);
add_lfvector_lfvectorS(ldV, ldV, c, alpha, numverts);
add_lfvector_lfvectorS(r, r, q, -alpha, numverts);
/* s = P^-1 * r */
cp_lfvector(s, r, numverts);
delta_old = delta_new;
delta_new = dot_lfvector(r, s, numverts);
add_lfvector_lfvectorS(c, s, c, delta_new / delta_old, numverts);
filter(c, S);
conjgrad_loopcount++;
}
# ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT
printf("==== dV ====\n");
print_lvector(ldV, numverts);
printf("========\n");
# endif
del_lfvector(fB);
del_lfvector(AdV);
del_lfvector(r);
del_lfvector(c);
del_lfvector(q);
del_lfvector(s);
// printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount);
result->status = conjgrad_loopcount < conjgrad_looplimit ? BPH_SOLVER_SUCCESS :
BPH_SOLVER_NO_CONVERGENCE;
result->iterations = conjgrad_loopcount;
result->error = bnorm2 > 0.0f ? sqrtf(delta_new / bnorm2) : 0.0f;
return conjgrad_loopcount <
conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable
}
# if 0
// block diagonalizer
DO_INLINE void BuildPPinv(fmatrix3x3 *lA, fmatrix3x3 *P, fmatrix3x3 *Pinv)
{
unsigned int i = 0;
// Take only the diagonal blocks of A
// #pragma omp parallel for private(i) if (lA[0].vcount > CLOTH_OPENMP_LIMIT)
for (i = 0; i < lA[0].vcount; i++) {
// block diagonalizer
cp_fmatrix(P[i].m, lA[i].m);
inverse_fmatrix(Pinv[i].m, P[i].m);
}
}
# if 0
// version 1.3
static int cg_filtered_pre(lfVector *dv,
fmatrix3x3 *lA,
lfVector *lB,
lfVector *z,
fmatrix3x3 *S,
fmatrix3x3 *P,
fmatrix3x3 *Pinv)
{
unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100;
float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0;
float conjgrad_epsilon = 0.0001; // 0.2 is dt for steps=5
lfVector *r = create_lfvector(numverts);
lfVector *p = create_lfvector(numverts);
lfVector *s = create_lfvector(numverts);
lfVector *h = create_lfvector(numverts);
BuildPPinv(lA, P, Pinv);
filter(dv, S);
add_lfvector_lfvector(dv, dv, z, numverts);
mul_bfmatrix_lfvector(r, lA, dv);
sub_lfvector_lfvector(r, lB, r, numverts);
filter(r, S);
mul_prevfmatrix_lfvector(p, Pinv, r);
filter(p, S);
deltaNew = dot_lfvector(r, p, numverts);
delta0 = deltaNew * sqrt(conjgrad_epsilon);
# ifdef DEBUG_TIME
double start = PIL_check_seconds_timer();
# endif
while ((deltaNew > delta0) && (iterations < conjgrad_looplimit)) {
iterations++;
mul_bfmatrix_lfvector(s, lA, p);
filter(s, S);
alpha = deltaNew / dot_lfvector(p, s, numverts);
add_lfvector_lfvectorS(dv, dv, p, alpha, numverts);
add_lfvector_lfvectorS(r, r, s, -alpha, numverts);
mul_prevfmatrix_lfvector(h, Pinv, r);
filter(h, S);
deltaOld = deltaNew;
deltaNew = dot_lfvector(r, h, numverts);
add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts);
filter(p, S);
}
# ifdef DEBUG_TIME
double end = PIL_check_seconds_timer();
printf("cg_filtered_pre time: %f\n", (float)(end - start));
# endif
del_lfvector(h);
del_lfvector(s);
del_lfvector(p);
del_lfvector(r);
printf("iterations: %d\n", iterations);
return iterations < conjgrad_looplimit;
}
# endif
// version 1.4
static int cg_filtered_pre(lfVector *dv,
fmatrix3x3 *lA,
lfVector *lB,
lfVector *z,
fmatrix3x3 *S,
fmatrix3x3 *P,
fmatrix3x3 *Pinv,
fmatrix3x3 *bigI)
{
unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100;
float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0, tol = 0;
lfVector *r = create_lfvector(numverts);
lfVector *p = create_lfvector(numverts);
lfVector *s = create_lfvector(numverts);
lfVector *h = create_lfvector(numverts);
lfVector *bhat = create_lfvector(numverts);
lfVector *btemp = create_lfvector(numverts);
BuildPPinv(lA, P, Pinv);
initdiag_bfmatrix(bigI, I);
sub_bfmatrix_Smatrix(bigI, bigI, S);
// x = Sx_0+(I-S)z
filter(dv, S);
add_lfvector_lfvector(dv, dv, z, numverts);
// b_hat = S(b-A(I-S)z)
mul_bfmatrix_lfvector(r, lA, z);
mul_bfmatrix_lfvector(bhat, bigI, r);
sub_lfvector_lfvector(bhat, lB, bhat, numverts);
// r = S(b-Ax)
mul_bfmatrix_lfvector(r, lA, dv);
sub_lfvector_lfvector(r, lB, r, numverts);
filter(r, S);
// p = SP^-1r
mul_prevfmatrix_lfvector(p, Pinv, r);
filter(p, S);
// delta0 = bhat^TP^-1bhat
mul_prevfmatrix_lfvector(btemp, Pinv, bhat);
delta0 = dot_lfvector(bhat, btemp, numverts);
// deltaNew = r^TP
deltaNew = dot_lfvector(r, p, numverts);
# if 0
filter(dv, S);
add_lfvector_lfvector(dv, dv, z, numverts);
mul_bfmatrix_lfvector(r, lA, dv);
sub_lfvector_lfvector(r, lB, r, numverts);
filter(r, S);
mul_prevfmatrix_lfvector(p, Pinv, r);
filter(p, S);
deltaNew = dot_lfvector(r, p, numverts);
delta0 = deltaNew * sqrt(conjgrad_epsilon);
# endif
# ifdef DEBUG_TIME
double start = PIL_check_seconds_timer();
# endif
tol = (0.01 * 0.2);
while ((deltaNew > delta0 * tol * tol) && (iterations < conjgrad_looplimit)) {
iterations++;
mul_bfmatrix_lfvector(s, lA, p);
filter(s, S);
alpha = deltaNew / dot_lfvector(p, s, numverts);
add_lfvector_lfvectorS(dv, dv, p, alpha, numverts);
add_lfvector_lfvectorS(r, r, s, -alpha, numverts);
mul_prevfmatrix_lfvector(h, Pinv, r);
filter(h, S);
deltaOld = deltaNew;
deltaNew = dot_lfvector(r, h, numverts);
add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts);
filter(p, S);
}
# ifdef DEBUG_TIME
double end = PIL_check_seconds_timer();
printf("cg_filtered_pre time: %f\n", (float)(end - start));
# endif
del_lfvector(btemp);
del_lfvector(bhat);
del_lfvector(h);
del_lfvector(s);
del_lfvector(p);
del_lfvector(r);
// printf("iterations: %d\n", iterations);
return iterations < conjgrad_looplimit;
}
# endif
bool BPH_mass_spring_solve_velocities(Implicit_Data *data, float dt, ImplicitSolverResult *result)
{
unsigned int numverts = data->dFdV[0].vcount;
lfVector *dFdXmV = create_lfvector(numverts);
zero_lfvector(data->dV, numverts);
cp_bfmatrix(data->A, data->M);
subadd_bfmatrixS_bfmatrixS(data->A, data->dFdV, dt, data->dFdX, (dt * dt));
mul_bfmatrix_lfvector(dFdXmV, data->dFdX, data->V);
add_lfvectorS_lfvectorS(data->B, data->F, dt, dFdXmV, (dt * dt), numverts);
# ifdef DEBUG_TIME
double start = PIL_check_seconds_timer();
# endif
cg_filtered(data->dV,
data->A,
data->B,
data->z,
data->S,
result); /* conjugate gradient algorithm to solve Ax=b */
// cg_filtered_pre(id->dV, id->A, id->B, id->z, id->S, id->P, id->Pinv, id->bigI);
# ifdef DEBUG_TIME
double end = PIL_check_seconds_timer();
printf("cg_filtered calc time: %f\n", (float)(end - start));
# endif
// advance velocities
add_lfvector_lfvector(data->Vnew, data->V, data->dV, numverts);
del_lfvector(dFdXmV);
return result->status == BPH_SOLVER_SUCCESS;
}
bool BPH_mass_spring_solve_positions(Implicit_Data *data, float dt)
{
int numverts = data->M[0].vcount;
// advance positions
add_lfvector_lfvectorS(data->Xnew, data->X, data->Vnew, dt, numverts);
return true;
}
void BPH_mass_spring_apply_result(Implicit_Data *data)
{
int numverts = data->M[0].vcount;
cp_lfvector(data->X, data->Xnew, numverts);
cp_lfvector(data->V, data->Vnew, numverts);
}
void BPH_mass_spring_set_vertex_mass(Implicit_Data *data, int index, float mass)
{
unit_m3(data->M[index].m);
mul_m3_fl(data->M[index].m, mass);
}
void BPH_mass_spring_set_rest_transform(Implicit_Data *data, int index, float tfm[3][3])
{
# ifdef CLOTH_ROOT_FRAME
copy_m3_m3(data->tfm[index].m, tfm);
# else
unit_m3(data->tfm[index].m);
(void)tfm;
# endif
}
void BPH_mass_spring_set_motion_state(Implicit_Data *data,
int index,
const float x[3],
const float v[3])
{
world_to_root_v3(data, index, data->X[index], x);
world_to_root_v3(data, index, data->V[index], v);
}
void BPH_mass_spring_set_position(Implicit_Data *data, int index, const float x[3])
{
world_to_root_v3(data, index, data->X[index], x);
}
void BPH_mass_spring_set_velocity(Implicit_Data *data, int index, const float v[3])
{
world_to_root_v3(data, index, data->V[index], v);
}
void BPH_mass_spring_get_motion_state(struct Implicit_Data *data,
int index,
float x[3],
float v[3])
{
if (x) {
root_to_world_v3(data, index, x, data->X[index]);
}
if (v) {
root_to_world_v3(data, index, v, data->V[index]);
}
}
void BPH_mass_spring_get_position(struct Implicit_Data *data, int index, float x[3])
{
root_to_world_v3(data, index, x, data->X[index]);
}
void BPH_mass_spring_get_new_position(struct Implicit_Data *data, int index, float x[3])
{
root_to_world_v3(data, index, x, data->Xnew[index]);
}
void BPH_mass_spring_set_new_position(struct Implicit_Data *data, int index, const float x[3])
{
world_to_root_v3(data, index, data->Xnew[index], x);
}
void BPH_mass_spring_get_new_velocity(struct Implicit_Data *data, int index, float v[3])
{
root_to_world_v3(data, index, v, data->Vnew[index]);
}
void BPH_mass_spring_set_new_velocity(struct Implicit_Data *data, int index, const float v[3])
{
world_to_root_v3(data, index, data->Vnew[index], v);
}
/* -------------------------------- */
static int BPH_mass_spring_add_block(Implicit_Data *data, int v1, int v2)
{
int s = data->M[0].vcount + data->num_blocks; /* index from array start */
BLI_assert(s < data->M[0].vcount + data->M[0].scount);
++data->num_blocks;
/* tfm and S don't have spring entries (diagonal blocks only) */
init_fmatrix(data->bigI + s, v1, v2);
init_fmatrix(data->M + s, v1, v2);
init_fmatrix(data->dFdX + s, v1, v2);
init_fmatrix(data->dFdV + s, v1, v2);
init_fmatrix(data->A + s, v1, v2);
init_fmatrix(data->P + s, v1, v2);
init_fmatrix(data->Pinv + s, v1, v2);
return s;
}
void BPH_mass_spring_clear_constraints(Implicit_Data *data)
{
int i, numverts = data->S[0].vcount;
for (i = 0; i < numverts; ++i) {
unit_m3(data->S[i].m);
zero_v3(data->z[i]);
}
}
void BPH_mass_spring_add_constraint_ndof0(Implicit_Data *data, int index, const float dV[3])
{
zero_m3(data->S[index].m);
world_to_root_v3(data, index, data->z[index], dV);
}
void BPH_mass_spring_add_constraint_ndof1(
Implicit_Data *data, int index, const float c1[3], const float c2[3], const float dV[3])
{
float m[3][3], p[3], q[3], u[3], cmat[3][3];
world_to_root_v3(data, index, p, c1);
mul_fvectorT_fvector(cmat, p, p);
sub_m3_m3m3(m, I, cmat);
world_to_root_v3(data, index, q, c2);
mul_fvectorT_fvector(cmat, q, q);
sub_m3_m3m3(m, m, cmat);
/* XXX not sure but multiplication should work here */
copy_m3_m3(data->S[index].m, m);
// mul_m3_m3m3(data->S[index].m, data->S[index].m, m);
world_to_root_v3(data, index, u, dV);
add_v3_v3(data->z[index], u);
}
void BPH_mass_spring_add_constraint_ndof2(Implicit_Data *data,
int index,
const float c1[3],
const float dV[3])
{
float m[3][3], p[3], u[3], cmat[3][3];
world_to_root_v3(data, index, p, c1);
mul_fvectorT_fvector(cmat, p, p);
sub_m3_m3m3(m, I, cmat);
copy_m3_m3(data->S[index].m, m);
// mul_m3_m3m3(data->S[index].m, data->S[index].m, m);
world_to_root_v3(data, index, u, dV);
add_v3_v3(data->z[index], u);
}
void BPH_mass_spring_clear_forces(Implicit_Data *data)
{
int numverts = data->M[0].vcount;
zero_lfvector(data->F, numverts);
init_bfmatrix(data->dFdX, ZERO);
init_bfmatrix(data->dFdV, ZERO);
data->num_blocks = 0;
}
void BPH_mass_spring_force_reference_frame(Implicit_Data *data,
int index,
const float acceleration[3],
const float omega[3],
const float domega_dt[3],
float mass)
{
# ifdef CLOTH_ROOT_FRAME
float acc[3], w[3], dwdt[3];
float f[3], dfdx[3][3], dfdv[3][3];
float euler[3], coriolis[3], centrifugal[3], rotvel[3];
float deuler[3][3], dcoriolis[3][3], dcentrifugal[3][3], drotvel[3][3];
world_to_root_v3(data, index, acc, acceleration);
world_to_root_v3(data, index, w, omega);
world_to_root_v3(data, index, dwdt, domega_dt);
cross_v3_v3v3(euler, dwdt, data->X[index]);
cross_v3_v3v3(coriolis, w, data->V[index]);
mul_v3_fl(coriolis, 2.0f);
cross_v3_v3v3(rotvel, w, data->X[index]);
cross_v3_v3v3(centrifugal, w, rotvel);
sub_v3_v3v3(f, acc, euler);
sub_v3_v3(f, coriolis);
sub_v3_v3(f, centrifugal);
mul_v3_fl(f, mass); /* F = m * a */
cross_v3_identity(deuler, dwdt);
cross_v3_identity(dcoriolis, w);
mul_m3_fl(dcoriolis, 2.0f);
cross_v3_identity(drotvel, w);
cross_m3_v3m3(dcentrifugal, w, drotvel);
add_m3_m3m3(dfdx, deuler, dcentrifugal);
negate_m3(dfdx);
mul_m3_fl(dfdx, mass);
copy_m3_m3(dfdv, dcoriolis);
negate_m3(dfdv);
mul_m3_fl(dfdv, mass);
add_v3_v3(data->F[index], f);
add_m3_m3m3(data->dFdX[index].m, data->dFdX[index].m, dfdx);
add_m3_m3m3(data->dFdV[index].m, data->dFdV[index].m, dfdv);
# else
(void)data;
(void)index;
(void)acceleration;
(void)omega;
(void)domega_dt;
# endif
}
void BPH_mass_spring_force_gravity(Implicit_Data *data, int index, float mass, const float g[3])
{
/* force = mass * acceleration (in this case: gravity) */
float f[3];
world_to_root_v3(data, index, f, g);
mul_v3_fl(f, mass);
add_v3_v3(data->F[index], f);
}
void BPH_mass_spring_force_drag(Implicit_Data *data, float drag)
{
int i, numverts = data->M[0].vcount;
for (i = 0; i < numverts; i++) {
float tmp[3][3];
/* NB: uses root space velocity, no need to transform */
madd_v3_v3fl(data->F[i], data->V[i], -drag);
copy_m3_m3(tmp, I);
mul_m3_fl(tmp, -drag);
add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tmp);
}
}
void BPH_mass_spring_force_extern(
struct Implicit_Data *data, int i, const float f[3], float dfdx[3][3], float dfdv[3][3])
{
float tf[3], tdfdx[3][3], tdfdv[3][3];
world_to_root_v3(data, i, tf, f);
world_to_root_m3(data, i, tdfdx, dfdx);
world_to_root_m3(data, i, tdfdv, dfdv);
add_v3_v3(data->F[i], tf);
add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, tdfdx);
add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tdfdv);
}
static float calc_nor_area_tri(float nor[3],
const float v1[3],
const float v2[3],
const float v3[3])
{
float n1[3], n2[3];
sub_v3_v3v3(n1, v1, v2);
sub_v3_v3v3(n2, v2, v3);
cross_v3_v3v3(nor, n1, n2);
return normalize_v3(nor);
}
/* XXX does not support force jacobians yet, since the effector system does not provide them either
*/
void BPH_mass_spring_force_face_wind(
Implicit_Data *data, int v1, int v2, int v3, const float (*winvec)[3])
{
const float effector_scale = 0.02f;
float win[3], nor[3], area;
float factor;
/* calculate face normal and area */
area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]);
factor = effector_scale * area / 3.0f;
world_to_root_v3(data, v1, win, winvec[v1]);
madd_v3_v3fl(data->F[v1], nor, factor * dot_v3v3(win, nor));
world_to_root_v3(data, v2, win, winvec[v2]);
madd_v3_v3fl(data->F[v2], nor, factor * dot_v3v3(win, nor));
world_to_root_v3(data, v3, win, winvec[v3]);
madd_v3_v3fl(data->F[v3], nor, factor * dot_v3v3(win, nor));
}
static void edge_wind_vertex(const float dir[3],
float length,
float radius,
const float wind[3],
float f[3],
float UNUSED(dfdx[3][3]),
float UNUSED(dfdv[3][3]))
{
const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */
float cos_alpha, sin_alpha, cross_section;
float windlen = len_v3(wind);
if (windlen == 0.0f) {
zero_v3(f);
return;
}
/* angle of wind direction to edge */
cos_alpha = dot_v3v3(wind, dir) / windlen;
sin_alpha = sqrtf(1.0f - cos_alpha * cos_alpha);
cross_section = radius * ((float)M_PI * radius * sin_alpha + length * cos_alpha);
mul_v3_v3fl(f, wind, density * cross_section);
}
void BPH_mass_spring_force_edge_wind(
Implicit_Data *data, int v1, int v2, float radius1, float radius2, const float (*winvec)[3])
{
float win[3], dir[3], length;
float f[3], dfdx[3][3], dfdv[3][3];
sub_v3_v3v3(dir, data->X[v1], data->X[v2]);
length = normalize_v3(dir);
world_to_root_v3(data, v1, win, winvec[v1]);
edge_wind_vertex(dir, length, radius1, win, f, dfdx, dfdv);
add_v3_v3(data->F[v1], f);
world_to_root_v3(data, v2, win, winvec[v2]);
edge_wind_vertex(dir, length, radius2, win, f, dfdx, dfdv);
add_v3_v3(data->F[v2], f);
}
void BPH_mass_spring_force_vertex_wind(Implicit_Data *data,
int v,
float UNUSED(radius),
const float (*winvec)[3])
{
const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */
float wind[3];
float f[3];
world_to_root_v3(data, v, wind, winvec[v]);
mul_v3_v3fl(f, wind, density);
add_v3_v3(data->F[v], f);
}
BLI_INLINE void dfdx_spring(float to[3][3], const float dir[3], float length, float L, float k)
{
// dir is unit length direction, rest is spring's restlength, k is spring constant.
// return ( (I-outerprod(dir, dir))*Min(1.0f, rest/length) - I) * -k;
outerproduct(to, dir, dir);
sub_m3_m3m3(to, I, to);
mul_m3_fl(to, (L / length));
sub_m3_m3m3(to, to, I);
mul_m3_fl(to, k);
}
/* unused */
# if 0
BLI_INLINE void dfdx_damp(float to[3][3],
const float dir[3],
float length,
const float vel[3],
float rest,
float damping)
{
// inner spring damping vel is the relative velocity of the endpoints.
// return (I-outerprod(dir, dir)) * (-damping * -(dot(dir, vel)/Max(length, rest)));
mul_fvectorT_fvector(to, dir, dir);
sub_fmatrix_fmatrix(to, I, to);
mul_fmatrix_S(to, (-damping * -(dot_v3v3(dir, vel) / MAX2(length, rest))));
}
# endif
BLI_INLINE void dfdv_damp(float to[3][3], const float dir[3], float damping)
{
// derivative of force wrt velocity
outerproduct(to, dir, dir);
mul_m3_fl(to, -damping);
}
BLI_INLINE float fb(float length, float L)
{
float x = length / L;
float xx = x * x;
float xxx = xx * x;
float xxxx = xxx * x;
return (-11.541f * xxxx + 34.193f * xxx - 39.083f * xx + 23.116f * x - 9.713f);
}
BLI_INLINE float fbderiv(float length, float L)
{
float x = length / L;
float xx = x * x;
float xxx = xx * x;
return (-46.164f * xxx + 102.579f * xx - 78.166f * x + 23.116f);
}
BLI_INLINE float fbstar(float length, float L, float kb, float cb)
{
float tempfb_fl = kb * fb(length, L);
float fbstar_fl = cb * (length - L);
if (tempfb_fl < fbstar_fl) {
return fbstar_fl;
}
else {
return tempfb_fl;
}
}
// function to calculae bending spring force (taken from Choi & Co)
BLI_INLINE float fbstar_jacobi(float length, float L, float kb, float cb)
{
float tempfb_fl = kb * fb(length, L);
float fbstar_fl = cb * (length - L);
if (tempfb_fl < fbstar_fl) {
return -cb;
}
else {
return -kb * fbderiv(length, L);
}
}
/* calculate elonglation */
BLI_INLINE bool spring_length(Implicit_Data *data,
int i,
int j,
float r_extent[3],
float r_dir[3],
float *r_length,
float r_vel[3])
{
sub_v3_v3v3(r_extent, data->X[j], data->X[i]);
sub_v3_v3v3(r_vel, data->V[j], data->V[i]);
*r_length = len_v3(r_extent);
if (*r_length > ALMOST_ZERO) {
# if 0
if (length > L) {
if ((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) &&
(((length - L) * 100.0f / L) > clmd->sim_parms->maxspringlen)) {
// cut spring!
s->flags |= CSPRING_FLAG_DEACTIVATE;
return false;
}
}
# endif
mul_v3_v3fl(r_dir, r_extent, 1.0f / (*r_length));
}
else {
zero_v3(r_dir);
}
return true;
}
BLI_INLINE void apply_spring(
Implicit_Data *data, int i, int j, const float f[3], float dfdx[3][3], float dfdv[3][3])
{
int block_ij = BPH_mass_spring_add_block(data, i, j);
add_v3_v3(data->F[i], f);
sub_v3_v3(data->F[j], f);
add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx);
add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfdx);
sub_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfdx);
add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv);
add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfdv);
sub_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfdv);
}
bool BPH_mass_spring_force_spring_linear(Implicit_Data *data,
int i,
int j,
float restlen,
float stiffness_tension,
float damping_tension,
float stiffness_compression,
float damping_compression,
bool resist_compress,
bool new_compress,
float clamp_force)
{
float extent[3], length, dir[3], vel[3];
float f[3], dfdx[3][3], dfdv[3][3];
float damping = 0;
// calculate elonglation
spring_length(data, i, j, extent, dir, &length, vel);
/* This code computes not only the force, but also its derivative.
* Zero derivative effectively disables the spring for the implicit solver.
* Thus length > restlen makes cloth unconstrained at the start of simulation. */
if ((length >= restlen && length > 0) || resist_compress) {
float stretch_force;
damping = damping_tension;
stretch_force = stiffness_tension * (length - restlen);
if (clamp_force > 0.0f && stretch_force > clamp_force) {
stretch_force = clamp_force;
}
mul_v3_v3fl(f, dir, stretch_force);
dfdx_spring(dfdx, dir, length, restlen, stiffness_tension);
}
else if (new_compress) {
/* This is based on the Choi and Ko bending model,
* which works surprisingly well for compression. */
float kb = stiffness_compression;
float cb = kb; /* cb equal to kb seems to work, but a factor can be added if necessary */
damping = damping_compression;
mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb));
outerproduct(dfdx, dir, dir);
mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb));
}
else {
return false;
}
madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir));
dfdv_damp(dfdv, dir, damping);
apply_spring(data, i, j, f, dfdx, dfdv);
return true;
}
/* See "Stable but Responsive Cloth" (Choi, Ko 2005) */
bool BPH_mass_spring_force_spring_bending(
Implicit_Data *data, int i, int j, float restlen, float kb, float cb)
{
float extent[3], length, dir[3], vel[3];
// calculate elonglation
spring_length(data, i, j, extent, dir, &length, vel);
if (length < restlen) {
float f[3], dfdx[3][3], dfdv[3][3];
mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb));
outerproduct(dfdx, dir, dir);
mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb));
/* XXX damping not supported */
zero_m3(dfdv);
apply_spring(data, i, j, f, dfdx, dfdv);
return true;
}
else {
return false;
}
}
BLI_INLINE void poly_avg(lfVector *data, int *inds, int len, float r_avg[3])
{
float fact = 1.0f / (float)len;
zero_v3(r_avg);
for (int i = 0; i < len; i++) {
madd_v3_v3fl(r_avg, data[inds[i]], fact);
}
}
BLI_INLINE void poly_norm(lfVector *data, int i, int j, int *inds, int len, float r_dir[3])
{
float mid[3];
poly_avg(data, inds, len, mid);
normal_tri_v3(r_dir, data[i], data[j], mid);
}
BLI_INLINE void edge_avg(lfVector *data, int i, int j, float r_avg[3])
{
r_avg[0] = (data[i][0] + data[j][0]) * 0.5f;
r_avg[1] = (data[i][1] + data[j][1]) * 0.5f;
r_avg[2] = (data[i][2] + data[j][2]) * 0.5f;
}
BLI_INLINE void edge_norm(lfVector *data, int i, int j, float r_dir[3])
{
sub_v3_v3v3(r_dir, data[i], data[j]);
normalize_v3(r_dir);
}
BLI_INLINE float bend_angle(float dir_a[3], float dir_b[3], float dir_e[3])
{
float cos, sin;
float tmp[3];
cos = dot_v3v3(dir_a, dir_b);
cross_v3_v3v3(tmp, dir_a, dir_b);
sin = dot_v3v3(tmp, dir_e);
return atan2f(sin, cos);
}
BLI_INLINE void spring_angle(Implicit_Data *data,
int i,
int j,
int *i_a,
int *i_b,
int len_a,
int len_b,
float r_dir_a[3],
float r_dir_b[3],
float *r_angle,
float r_vel_a[3],
float r_vel_b[3])
{
float dir_e[3], vel_e[3];
poly_norm(data->X, j, i, i_a, len_a, r_dir_a);
poly_norm(data->X, i, j, i_b, len_b, r_dir_b);
edge_norm(data->X, i, j, dir_e);
*r_angle = bend_angle(r_dir_a, r_dir_b, dir_e);
poly_avg(data->V, i_a, len_a, r_vel_a);
poly_avg(data->V, i_b, len_b, r_vel_b);
edge_avg(data->V, i, j, vel_e);
sub_v3_v3(r_vel_a, vel_e);
sub_v3_v3(r_vel_b, vel_e);
}
/* Angular springs roughly based on the bending model proposed by Baraff and Witkin in "Large Steps
* in Cloth Simulation". */
bool BPH_mass_spring_force_spring_angular(Implicit_Data *data,
int i,
int j,
int *i_a,
int *i_b,
int len_a,
int len_b,
float restang,
float stiffness,
float damping)
{
float angle, dir_a[3], dir_b[3], vel_a[3], vel_b[3];
float f_a[3], f_b[3], f_e[3];
float force;
int x;
spring_angle(data, i, j, i_a, i_b, len_a, len_b, dir_a, dir_b, &angle, vel_a, vel_b);
/* spring force */
force = stiffness * (angle - restang);
/* damping force */
force += -damping * (dot_v3v3(vel_a, dir_a) + dot_v3v3(vel_b, dir_b));
mul_v3_v3fl(f_a, dir_a, force / len_a);
mul_v3_v3fl(f_b, dir_b, force / len_b);
for (x = 0; x < len_a; x++) {
add_v3_v3(data->F[i_a[x]], f_a);
}
for (x = 0; x < len_b; x++) {
add_v3_v3(data->F[i_b[x]], f_b);
}
mul_v3_v3fl(f_a, dir_a, force * 0.5f);
mul_v3_v3fl(f_b, dir_b, force * 0.5f);
add_v3_v3v3(f_e, f_a, f_b);
sub_v3_v3(data->F[i], f_e);
sub_v3_v3(data->F[j], f_e);
return true;
}
/* Jacobian of a direction vector.
* Basically the part of the differential orthogonal to the direction,
* inversely proportional to the length of the edge.
*
* dD_ij/dx_i = -dD_ij/dx_j = (D_ij * D_ij^T - I) / len_ij
*/
BLI_INLINE void spring_grad_dir(
Implicit_Data *data, int i, int j, float edge[3], float dir[3], float grad_dir[3][3])
{
float length;
sub_v3_v3v3(edge, data->X[j], data->X[i]);
length = normalize_v3_v3(dir, edge);
if (length > ALMOST_ZERO) {
outerproduct(grad_dir, dir, dir);
sub_m3_m3m3(grad_dir, I, grad_dir);
mul_m3_fl(grad_dir, 1.0f / length);
}
else {
zero_m3(grad_dir);
}
}
BLI_INLINE void spring_hairbend_forces(Implicit_Data *data,
int i,
int j,
int k,
const float goal[3],
float stiffness,
float damping,
int q,
const float dx[3],
const float dv[3],
float r_f[3])
{
float edge_ij[3], dir_ij[3];
float edge_jk[3], dir_jk[3];
float vel_ij[3], vel_jk[3], vel_ortho[3];
float f_bend[3], f_damp[3];
float fk[3];
float dist[3];
zero_v3(fk);
sub_v3_v3v3(edge_ij, data->X[j], data->X[i]);
if (q == i) {
sub_v3_v3(edge_ij, dx);
}
if (q == j) {
add_v3_v3(edge_ij, dx);
}
normalize_v3_v3(dir_ij, edge_ij);
sub_v3_v3v3(edge_jk, data->X[k], data->X[j]);
if (q == j) {
sub_v3_v3(edge_jk, dx);
}
if (q == k) {
add_v3_v3(edge_jk, dx);
}
normalize_v3_v3(dir_jk, edge_jk);
sub_v3_v3v3(vel_ij, data->V[j], data->V[i]);
if (q == i) {
sub_v3_v3(vel_ij, dv);
}
if (q == j) {
add_v3_v3(vel_ij, dv);
}
sub_v3_v3v3(vel_jk, data->V[k], data->V[j]);
if (q == j) {
sub_v3_v3(vel_jk, dv);
}
if (q == k) {
add_v3_v3(vel_jk, dv);
}
/* bending force */
sub_v3_v3v3(dist, goal, edge_jk);
mul_v3_v3fl(f_bend, dist, stiffness);
add_v3_v3(fk, f_bend);
/* damping force */
madd_v3_v3v3fl(vel_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk));
mul_v3_v3fl(f_damp, vel_ortho, damping);
sub_v3_v3(fk, f_damp);
copy_v3_v3(r_f, fk);
}
/* Finite Differences method for estimating the jacobian of the force */
BLI_INLINE void spring_hairbend_estimate_dfdx(Implicit_Data *data,
int i,
int j,
int k,
const float goal[3],
float stiffness,
float damping,
int q,
float dfdx[3][3])
{
const float delta = 0.00001f; // TODO find a good heuristic for this
float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3];
float f[3];
int a, b;
zero_m3(dvec_null);
unit_m3(dvec_pos);
mul_m3_fl(dvec_pos, delta * 0.5f);
copy_m3_m3(dvec_neg, dvec_pos);
negate_m3(dvec_neg);
/* XXX TODO offset targets to account for position dependency */
for (a = 0; a < 3; ++a) {
spring_hairbend_forces(
data, i, j, k, goal, stiffness, damping, q, dvec_pos[a], dvec_null[a], f);
copy_v3_v3(dfdx[a], f);
spring_hairbend_forces(
data, i, j, k, goal, stiffness, damping, q, dvec_neg[a], dvec_null[a], f);
sub_v3_v3(dfdx[a], f);
for (b = 0; b < 3; ++b) {
dfdx[a][b] /= delta;
}
}
}
/* Finite Differences method for estimating the jacobian of the force */
BLI_INLINE void spring_hairbend_estimate_dfdv(Implicit_Data *data,
int i,
int j,
int k,
const float goal[3],
float stiffness,
float damping,
int q,
float dfdv[3][3])
{
const float delta = 0.00001f; // TODO find a good heuristic for this
float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3];
float f[3];
int a, b;
zero_m3(dvec_null);
unit_m3(dvec_pos);
mul_m3_fl(dvec_pos, delta * 0.5f);
copy_m3_m3(dvec_neg, dvec_pos);
negate_m3(dvec_neg);
/* XXX TODO offset targets to account for position dependency */
for (a = 0; a < 3; ++a) {
spring_hairbend_forces(
data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_pos[a], f);
copy_v3_v3(dfdv[a], f);
spring_hairbend_forces(
data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_neg[a], f);
sub_v3_v3(dfdv[a], f);
for (b = 0; b < 3; ++b) {
dfdv[a][b] /= delta;
}
}
}
/* Angular spring that pulls the vertex toward the local target
* See "Artistic Simulation of Curly Hair" (Pixar technical memo #12-03a)
*/
bool BPH_mass_spring_force_spring_bending_hair(Implicit_Data *data,
int i,
int j,
int k,
const float target[3],
float stiffness,
float damping)
{
float goal[3];
float fj[3], fk[3];
float dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3];
float dfj_dvi[3][3], dfj_dvj[3][3], dfk_dvi[3][3], dfk_dvj[3][3], dfk_dvk[3][3];
const float vecnull[3] = {0.0f, 0.0f, 0.0f};
int block_ij = BPH_mass_spring_add_block(data, i, j);
int block_jk = BPH_mass_spring_add_block(data, j, k);
int block_ik = BPH_mass_spring_add_block(data, i, k);
world_to_root_v3(data, j, goal, target);
spring_hairbend_forces(data, i, j, k, goal, stiffness, damping, k, vecnull, vecnull, fk);
negate_v3_v3(fj, fk); /* counterforce */
spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, i, dfk_dxi);
spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, j, dfk_dxj);
spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, k, dfk_dxk);
copy_m3_m3(dfj_dxi, dfk_dxi);
negate_m3(dfj_dxi);
copy_m3_m3(dfj_dxj, dfk_dxj);
negate_m3(dfj_dxj);
spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, i, dfk_dvi);
spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, j, dfk_dvj);
spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, k, dfk_dvk);
copy_m3_m3(dfj_dvi, dfk_dvi);
negate_m3(dfj_dvi);
copy_m3_m3(dfj_dvj, dfk_dvj);
negate_m3(dfj_dvj);
/* add forces and jacobians to the solver data */
add_v3_v3(data->F[j], fj);
add_v3_v3(data->F[k], fk);
add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj);
add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk);
add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi);
add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj);
add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi);
add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfj_dvj);
add_m3_m3m3(data->dFdV[k].m, data->dFdV[k].m, dfk_dvk);
add_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfj_dvi);
add_m3_m3m3(data->dFdV[block_jk].m, data->dFdV[block_jk].m, dfk_dvj);
add_m3_m3m3(data->dFdV[block_ik].m, data->dFdV[block_ik].m, dfk_dvi);
/* XXX analytical calculation of derivatives below is incorrect.
* This proved to be difficult, but for now just using the finite difference method for
* estimating the jacobians should be sufficient.
*/
# if 0
float edge_ij[3], dir_ij[3], grad_dir_ij[3][3];
float edge_jk[3], dir_jk[3], grad_dir_jk[3][3];
float dist[3], vel_jk[3], vel_jk_ortho[3], projvel[3];
float target[3];
float tmp[3][3];
float fi[3], fj[3], fk[3];
float dfi_dxi[3][3], dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3];
float dfdvi[3][3];
// TESTING
damping = 0.0f;
zero_v3(fi);
zero_v3(fj);
zero_v3(fk);
zero_m3(dfi_dxi);
zero_m3(dfj_dxi);
zero_m3(dfk_dxi);
zero_m3(dfk_dxj);
zero_m3(dfk_dxk);
/* jacobian of direction vectors */
spring_grad_dir(data, i, j, edge_ij, dir_ij, grad_dir_ij);
spring_grad_dir(data, j, k, edge_jk, dir_jk, grad_dir_jk);
sub_v3_v3v3(vel_jk, data->V[k], data->V[j]);
/* bending force */
mul_v3_v3fl(target, dir_ij, restlen);
sub_v3_v3v3(dist, target, edge_jk);
mul_v3_v3fl(fk, dist, stiffness);
/* damping force */
madd_v3_v3v3fl(vel_jk_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk));
madd_v3_v3fl(fk, vel_jk_ortho, damping);
/* XXX this only holds true as long as we assume straight rest shape!
* eventually will become a bit more involved since the opposite segment
* gets its own target, under condition of having equal torque on both sides.
*/
copy_v3_v3(fi, fk);
/* counterforce on the middle point */
sub_v3_v3(fj, fi);
sub_v3_v3(fj, fk);
/* === derivatives === */
madd_m3_m3fl(dfk_dxi, grad_dir_ij, stiffness * restlen);
madd_m3_m3fl(dfk_dxj, grad_dir_ij, -stiffness * restlen);
madd_m3_m3fl(dfk_dxj, I, stiffness);
madd_m3_m3fl(dfk_dxk, I, -stiffness);
copy_m3_m3(dfi_dxi, dfk_dxk);
negate_m3(dfi_dxi);
/* dfj_dfi == dfi_dfj due to symmetry,
* dfi_dfj == dfk_dfj due to fi == fk
* XXX see comment above on future bent rest shapes
*/
copy_m3_m3(dfj_dxi, dfk_dxj);
/* dfj_dxj == -(dfi_dxj + dfk_dxj) due to fj == -(fi + fk) */
sub_m3_m3m3(dfj_dxj, dfj_dxj, dfj_dxi);
sub_m3_m3m3(dfj_dxj, dfj_dxj, dfk_dxj);
/* add forces and jacobians to the solver data */
add_v3_v3(data->F[i], fi);
add_v3_v3(data->F[j], fj);
add_v3_v3(data->F[k], fk);
add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfi_dxi);
add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj);
add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk);
add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi);
add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj);
add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi);
# endif
return true;
}
bool BPH_mass_spring_force_spring_goal(Implicit_Data *data,
int i,
const float goal_x[3],
const float goal_v[3],
float stiffness,
float damping)
{
float root_goal_x[3], root_goal_v[3], extent[3], length, dir[3], vel[3];
float f[3], dfdx[3][3], dfdv[3][3];
/* goal is in world space */
world_to_root_v3(data, i, root_goal_x, goal_x);
world_to_root_v3(data, i, root_goal_v, goal_v);
sub_v3_v3v3(extent, root_goal_x, data->X[i]);
sub_v3_v3v3(vel, root_goal_v, data->V[i]);
length = normalize_v3_v3(dir, extent);
if (length > ALMOST_ZERO) {
mul_v3_v3fl(f, dir, stiffness * length);
// Ascher & Boxman, p.21: Damping only during elonglation
// something wrong with it...
madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir));
dfdx_spring(dfdx, dir, length, 0.0f, stiffness);
dfdv_damp(dfdv, dir, damping);
add_v3_v3(data->F[i], f);
add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx);
add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv);
return true;
}
else {
return false;
}
}
#endif /* IMPLICIT_SOLVER_BLENDER */
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 16;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
rkb_screen.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <complex.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
#define LL 0
#define SS 1
#define SL 2
#define LS 3
int int2e_spinor();
int int2e_spsp1spsp2_spinor();
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
int CVHFrkbllll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (opt->dm_cond[j*n+k] > dmin)
|| (opt->dm_cond[j*n+l] > dmin)
|| (opt->dm_cond[i*n+k] > dmin)
|| (opt->dm_cond[i*n+l] > dmin));
}
int CVHFrkbllll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + nbas*nbas;
for (idm = 0; idm < (n_dm+1)/2; idm++) {
// note in _vhf.rdirect_mapdm, J and K share the same DM
dms_cond[idm*2+0] = pdmscond + idm*nbas*nbas; // for vj
dms_cond[idm*2+1] = pdmscond + idm*nbas*nbas; // for vk
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
int CVHFrkbssll_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *dmsl = opt->dm_cond + n*n*SL;
double qijkl = opt->q_cond[n*n*SS+i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[n*n*SS+j*n+i] > dmin)
|| (opt->dm_cond[l*n+k] > dmin)
|| (dmsl[j*n+k] > dmin)
|| (dmsl[j*n+l] > dmin)
|| (dmsl[i*n+k] > dmin)
|| (dmsl[i*n+l] > dmin));
}
// be careful with the order in dms_cond, the current order (dmll, dmss, dmsl)
// is consistent to the function _call_veff_ssll in dhf.py
int CVHFrkbssll_vkscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int nbas = opt->nbas;
int idm;
double qijkl = opt->q_cond[nbas*nbas*SS+i*nbas+j] * opt->q_cond[k*nbas+l];
double *pdmscond = opt->dm_cond + 4*nbas*nbas;
int nset = (n_dm+2) / 3;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
for (idm = 0; idm < nset; idm++) {
dms_cond[nset*0+idm] = dmscondll + idm*nbas*nbas;
dms_cond[nset*1+idm] = dmscondss + idm*nbas*nbas;
dms_cond[nset*2+idm] = dmscondsl + idm*nbas*nbas;
}
*dm_atleast = opt->direct_scf_cutoff / qijkl;
return 1;
}
static void set_qcond(int (*intor)(), CINTOpt *cintopt, double *qcond,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel default(none) \
shared(intor, cintopt, qcond, ao_loc, atm, natm, bas, nbas, env)
{
double qtmp, tmp;
int i, j, ij, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double complex *buf = malloc(sizeof(double complex) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = cabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
qcond[ish*nbas+jsh] = qtmp;
qcond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFrkbllll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
assert(intor == &int2e_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
assert(intor == &int2e_spsp1spsp2_spinor);
set_qcond(intor, cintopt, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
double c1 = .25/(env[PTR_LIGHT_SPEED]*env[PTR_LIGHT_SPEED]);
double *qcond = opt->q_cond;
int i;
for (i = 0; i < nbas*nbas; i++) {
qcond[i] *= c1;
}
}
void CVHFrkbssll_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2);
set_qcond(&int2e_spinor, NULL, opt->q_cond, ao_loc, atm, natm, bas, nbas, env);
set_qcond(&int2e_spsp1spsp2_spinor, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
double c1 = .25/(env[PTR_LIGHT_SPEED]*env[PTR_LIGHT_SPEED]);
double *qcond = opt->q_cond + nbas*nbas;
int i;
for (i = 0; i < nbas*nbas; i++) {
qcond[i] *= c1;
}
}
static void set_dmcond(double *dmcond, double *dmscond, double complex *dm,
double direct_scf_cutoff, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
const int nao = ao_loc[nbas];
double dmax, dmaxi, tmp;
int i, j, ish, jsh;
int iset;
double complex *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh < nbas; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
dmaxi = 0;
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
tmp = cabs(pdm[i*nao+j]);
dmaxi = MAX(dmaxi, tmp);
} }
dmscond[iset*nbas*nbas+ish*nbas+jsh] = dmaxi;
dmax = MAX(dmax, dmaxi);
}
dmcond[ish*nbas+jsh] = dmax;
} }
}
// dm_cond ~ 1+nset, dm_cond + dms_cond
void CVHFrkbllll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset));
// dmcond followed by dmscond which are max matrix element for each dm
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFrkbssss_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*(1+nset));
set_dmcond(opt->dm_cond, opt->dm_cond+nbas*nbas, dm,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
// the current order of dmscond (dmll, dmss, dmsl) is consistent to the
// function _call_veff_ssll in dhf.py
void CVHFrkbssll_direct_scf_dm(CVHFOpt *opt, double complex *dm, int nset,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
if (nset < 3) {
fprintf(stderr, "At least 3 sets of DMs (dmll,dmss,dmsl) are "
"required to set rkb prescreening\n");
exit(1);
}
nset = nset / 3;
opt->dm_cond = (double *)malloc(sizeof(double)*nbas*nbas*4*(1+nset));
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas*4*(1+nset));
// 4 types of dmcond (LL,SS,SL,SS) followed by 4 types of dmscond
int n2c = CINTtot_cgto_spinor(bas, nbas);
double *dmcondll = opt->dm_cond + nbas*nbas*LL;
double *dmcondss = opt->dm_cond + nbas*nbas*SS;
double *dmcondsl = opt->dm_cond + nbas*nbas*SL;
//double *dmcondls = opt->dm_cond + nbas*nbas*LS;
double *pdmscond = opt->dm_cond + nbas*nbas*4;
double *dmscondll = pdmscond + nset*nbas*nbas*LL;
double *dmscondss = pdmscond + nset*nbas*nbas*SS;
double *dmscondsl = pdmscond + nset*nbas*nbas*SL;
//double *dmscondls = dmscond + nset*nbas*nbas*LS;
double complex *dmll = dm + n2c*n2c*LL*nset;
double complex *dmss = dm + n2c*n2c*SS*nset;
double complex *dmsl = dm + n2c*n2c*SL*nset;
//double complex *dmls = dm + n2c*n2c*LS*nset;
set_dmcond(dmcondll, dmscondll, dmll,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondss, dmscondss, dmss,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
set_dmcond(dmcondsl, dmscondsl, dmsl,
opt->direct_scf_cutoff, nset, ao_loc, atm, natm, bas, nbas, env);
}
|
GB_binop__max_int8.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__max_int8)
// A.*B function (eWiseMult): GB (_AemultB_08__max_int8)
// A.*B function (eWiseMult): GB (_AemultB_02__max_int8)
// A.*B function (eWiseMult): GB (_AemultB_04__max_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__max_int8)
// A*D function (colscale): GB (_AxD__max_int8)
// D*A function (rowscale): GB (_DxB__max_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__max_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__max_int8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_int8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_int8)
// C=scalar+B GB (_bind1st__max_int8)
// C=scalar+B' GB (_bind1st_tran__max_int8)
// C=A+scalar GB (_bind2nd__max_int8)
// C=A'+scalar GB (_bind2nd_tran__max_int8)
// C type: int8_t
// A type: int8_t
// A pattern? 0
// B type: int8_t
// B pattern? 0
// BinaryOp: cij = GB_IMAX (aij, bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int8_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_IMAX (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MAX || GxB_NO_INT8 || GxB_NO_MAX_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__max_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__max_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__max_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__max_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__max_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__max_int8)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__max_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int8_t alpha_scalar ;
int8_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int8_t *) alpha_scalar_in)) ;
beta_scalar = (*((int8_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__max_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__max_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__max_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__max_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__max_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_IMAX (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__max_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_IMAX (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__max_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_IMAX (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__max_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
snmp_fmt_plug.c | /* Cracker for SNMPv3 USM hashes, https://tools.ietf.org/html/rfc3414.
*
* This software is Copyright (c) 2017, Dhiru Kholia <dhiru [at] openwall.com>,
* and it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Thanks to https://www.0x0ff.info/2013/snmpv3-authentification/ for the very
* clear explanation of the algorithms involved in SNMPv3 USM.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_snmp;
#elif FMT_REGISTERS_H
john_register_one(&fmt_snmp);
#else
#include <string.h>
#include <stdint.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 8
#endif
#endif
#include "formats.h"
#include "md5.h"
#include "hmacmd5.h"
#include "sha.h"
#include "hmac_sha.h"
#include "misc.h"
#include "common.h"
#include "params.h"
#include "options.h"
#include "memdbg.h"
#define FORMAT_LABEL "SNMP"
#define FORMAT_NAME "SNMPv3 USM"
#define FORMAT_TAG "$SNMPv3$"
#define TAG_LENGTH (sizeof(FORMAT_TAG) - 1)
#define ALGORITHM_NAME "HMAC-MD5-96/HMAC-SHA1-96 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 0
#define BINARY_ALIGN 1
#define SALT_SIZE sizeof(struct custom_salt)
#define SALT_ALIGN sizeof(uint32_t)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define MAX_SALT_LEN 1500
static struct fmt_tests tests[] = {
// https://wiki.wireshark.org/SampleCaptures, snmp_usm.pcap, pippo, md5
{"$SNMPv3$1$3$3081b10201033011020430f6f3d5020300ffe304010702010304373035040d80001f888059dc486145a2632202010802020ab90405706970706f040c00000000000000000000000004080000000103d5321a0460826ecf6443956d4c364bfc6f6ffc8ee0df000ffd0955af12d2c0f3c60fadea417d2bb80c0b2c1fa7a46ce44f9f16e15ee830a49881f60ecfa757d2f04000eb39a94058121d88ca20eeef4e6bf06784c67c15f144915d9bc2c6a0461da92a4abe$80001f888059dc486145a26322$19395e67894fda182414849f", "pippoxxx"},
// https://wiki.wireshark.org/SampleCaptures, snmp_usm.pcap, pippo, same as above but with missing algorithm specifier (0 instead of 1)
{"$SNMPv3$0$3$3081b10201033011020430f6f3d5020300ffe304010702010304373035040d80001f888059dc486145a2632202010802020ab90405706970706f040c00000000000000000000000004080000000103d5321a0460826ecf6443956d4c364bfc6f6ffc8ee0df000ffd0955af12d2c0f3c60fadea417d2bb80c0b2c1fa7a46ce44f9f16e15ee830a49881f60ecfa757d2f04000eb39a94058121d88ca20eeef4e6bf06784c67c15f144915d9bc2c6a0461da92a4abe$80001f888059dc486145a26322$19395e67894fda182414849f", "pippoxxx"},
// https://wiki.wireshark.org/SampleCaptures, snmp_usm.pcap, pippo3, sha1
{"$SNMPv3$2$76$30820144020103301102043cdca370020300ffe304010302010304383036040d80001f888059dc486145a2632202010802020aba0406706970706f33040c0000000000000000000000000408f9a7cd5639adc7de0481f12d4e0febddef162199aa61bb97f44b84d975d9cef001d31eed660a193c22362c2ba6d203932822baa6c5d0032cc5cd7a8b7ac7b2fc005820ea72d72ffe59d3696be2bc8d5bdffb2de6fc775ed26cbf2d49a513704867665126775b8ffcaf3c07c19f9ecefb20293af7a6beecb6a5f2e3ba812ed9d71d21679007546f3acc6b72aff2baff2688451e74434dc9e6dab2f1b5e149691ced9fb4283fc8f85e3e7ebbe833353076fbdea7a11bc13a8c5ea62385b519e8bd2ab15f646572f487c8eb471eb0b069c5cc500eb8abc0227746d4ee8a5d9f0d6bfd9ece27f3f99ad5937c3e9be08e3074963796d3a13907fa1f17d213$80001f888059dc486145a26322$3de2a23a91ef278f8277b3f5", "pippoxxx"},
// https://www.0x0ff.info/2013/snmpv3-authentification/
{"$SNMPv3$1$0$30818002010330110204580b8cc7020300ffe30401050201030431302f041180001f888062dc7f4c15465c510000000002010302017c040475736572040c00000000000000000000000004003035041180001f888062dc7f4c15465c51000000000400a11e0204334304ff0201000201003010300e060a2b06010201041e0105010500$80001f888062dc7f4c15465c5100000000$9b1b71e33603a30c125f095d", "useruseruser"},
// UTF-8 password
{"$SNMPv3$1$4$3081a30201033011020416396d42020300ffe304010302010304393037041180001f88804883c95f7803fa580000000002010102016904046c756c75040c00000000000000000000000004080000000166c4ecb40450cee8d8c70a64bc0b508bb2a5625f9916a35a4c1f2d1a4d436c02312edad700a1a21bb23c319b073ed8b2a84d3829961e87af1a30daa443f7408dcc0dbee952b8fb0eab20760488908f31047b31caefba$80001f88804883c95f7803fa5800000000$9fa2a2e12cff0ca34794e988", "1234567£"},
// SNMPv3 over IPv6
{"$SNMPv3$0$4$3081a302010330110204551e91ab020300ffcf04010302010304393037041180001f88804883c95f7803fa580000000002010202015404046c756c75040c00000000000000000000000004080000000296c59db40450b0228ff64c7311310b1c41e63b999087495bb482700f40646ec63e461490ff985436cc8dfd63ed0bc1e66b307eab019bdb406e27df3c175eecbf82504639694efd38e4eff6bd91c524443a962fb331e8$80001f88804883c95f7803fa5800000000$af477d4cc2e0d31e9340acf9", "1234567£"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked, cracked_count;
static struct custom_salt {
uint32_t authProtocol;
unsigned char salt[MAX_SALT_LEN];
uint32_t salt_length;
unsigned char engineID[32]; // has to be in between 5 and 32 (both inclusive)
uint32_t engineLength;
unsigned char msgAuthenticationParameters[12];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_num_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
cracked_count = self->params.max_keys_per_crypt;
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
int value, extra;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += TAG_LENGTH;
if ((p = strtokm(ctcopy, "$")) == NULL) // algorithm
goto err;
if (!isdec(p))
goto err;
value = atoi(p);
if (value != 0 && value != 1 && value != 2)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) // packet number, for debugging
goto err;
if (!isdec(p))
goto err;
if ((p = strtokm(NULL, "$")) == NULL) // salt (wholeMsg)
goto err;
if (hexlenl(p, &extra) > MAX_SALT_LEN * 2 || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) // msgAuthoritativeEngineID / snmpEngineID
goto err;
if (hexlenl(p, &extra) > 32 * 2 || extra)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) // msgAuthenticationParameters (hash)
goto err;
if (hexlenl(p, &extra) != 12 * 2 || extra)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
static struct custom_salt cs;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
int i;
memset(&cs, 0, SALT_SIZE);
ctcopy += TAG_LENGTH;
p = strtokm(ctcopy, "$");
cs.authProtocol = atoi(p);
p = strtokm(NULL, "$");
p = strtokm(NULL, "$");
cs.salt_length = strlen(p) / 2;
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])];
p = strtokm(NULL, "$");
cs.engineLength = strlen(p) / 2;
for (i = 0; i < cs.engineLength; i++)
cs.engineID[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])];
p = strtokm(NULL, "$");
for (i = 0; i < 12; i++)
cs.msgAuthenticationParameters[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])];
MEM_FREE(keeptr);
return &cs;
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
/* Password to Key Sample Code for MD5, from RFC 3414 A.2.1 and Wireshark */
static void snmp_usm_password_to_key_md5(const uint8_t *password, uint32_t
passwordlen, const uint8_t *engineID, uint32_t engineLength,
uint8_t *key)
{
uint8_t *cp, password_buf[64];
uint32_t password_index = 0;
uint32_t count = 0, i;
MD5_CTX ctx;
MD5_Init(&ctx);
/**********************************************/
/* Use while loop until we've done 1 Megabyte */
/**********************************************/
while (count < 1048576) {
cp = password_buf;
if (passwordlen != 0) {
for (i = 0; i < 64; i++) {
/*************************************************/
/* Take the next octet of the password, wrapping */
/* to the beginning of the password as necessary.*/
/*************************************************/
*cp++ = password[password_index++ % passwordlen];
}
} else {
*cp = 0;
}
MD5_Update(&ctx, password_buf, 64);
count += 64;
}
MD5_Final(key, &ctx);
/*****************************************************/
/* Now localize the key with the engineID and pass */
/* through MD5 to produce final key */
/* May want to ensure that engineLength <= 32, */
/* otherwise need to use a buffer larger than 64 */
/*****************************************************/
memcpy(password_buf, key, 16);
memcpy(password_buf+16, engineID, engineLength);
memcpy(password_buf+16+engineLength, key, 16);
MD5_Init(&ctx);
MD5_Update(&ctx, password_buf, 32+engineLength);
MD5_Final(key, &ctx);
}
/* Password to Key Sample Code for SHA, from RFC 3414 A.2.2 and Wireshark */
static void snmp_usm_password_to_key_sha(const uint8_t *password, uint32_t
passwordlen, const uint8_t *engineID, uint32_t engineLength,
uint8_t *key)
{
uint8_t *cp, password_buf[72];
uint32_t password_index = 0;
uint32_t count = 0, i;
SHA_CTX ctx;
SHA1_Init(&ctx);
/**********************************************/
/* Use while loop until we've done 1 Megabyte */
/**********************************************/
while (count < 1048576) {
cp = password_buf;
if (passwordlen != 0) {
for (i = 0; i < 64; i++) {
/*************************************************/
/* Take the next octet of the password, wrapping */
/* to the beginning of the password as necessary.*/
/*************************************************/
*cp++ = password[password_index++ % passwordlen];
}
} else {
*cp = 0;
}
SHA1_Update(&ctx, password_buf, 64);
count += 64;
}
SHA1_Final(key, &ctx);
/*****************************************************/
/* Now localize the key with the engineID and pass */
/* through SHA to produce final key */
/* May want to ensure that engineLength <= 32, */
/* otherwise need to use a buffer larger than 72 */
/*****************************************************/
memcpy(password_buf, key, 20);
memcpy(password_buf+20, engineID, engineLength);
memcpy(password_buf+20+engineLength, key, 20);
SHA1_Init(&ctx);
SHA1_Update(&ctx, password_buf, 40+engineLength);
SHA1_Final(key, &ctx);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
memset(cracked, 0, sizeof(cracked[0])*cracked_count);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
{
HMACMD5Context ctx;
unsigned char authKey[20];
unsigned char out[20];
if (cur_salt->authProtocol == 1) {
snmp_usm_password_to_key_md5((const uint8_t *)saved_key[index],
strlen(saved_key[index]),
cur_salt->engineID,
cur_salt->engineLength, authKey);
hmac_md5_init_rfc2104(authKey, 16, &ctx);
hmac_md5_update(cur_salt->salt, cur_salt->salt_length, &ctx);
hmac_md5_final(out, &ctx);
if (memcmp(out, cur_salt->msgAuthenticationParameters, 12) == 0)
cracked[index] = 1;
else
cracked[index] = 0;
} else if (cur_salt->authProtocol == 2) {
snmp_usm_password_to_key_sha((const uint8_t *)saved_key[index],
strlen(saved_key[index]),
cur_salt->engineID,
cur_salt->engineLength, authKey);
hmac_sha1(authKey, 20, cur_salt->salt, cur_salt->salt_length, out, 12);
if (memcmp(out, cur_salt->msgAuthenticationParameters, 12) == 0)
cracked[index] = 1;
else
cracked[index] = 0;
} else if (cur_salt->authProtocol == 0) {
cracked[index] = 0;
snmp_usm_password_to_key_md5((const uint8_t *)saved_key[index],
strlen(saved_key[index]),
cur_salt->engineID,
cur_salt->engineLength, authKey);
hmac_md5_init_rfc2104(authKey, 16, &ctx);
hmac_md5_update(cur_salt->salt, cur_salt->salt_length, &ctx);
hmac_md5_final(out, &ctx);
if (memcmp(out, cur_salt->msgAuthenticationParameters, 12) == 0) {
cracked[index] = 1;
continue;
}
snmp_usm_password_to_key_sha((const uint8_t *)saved_key[index],
strlen(saved_key[index]),
cur_salt->engineID,
cur_salt->engineLength, authKey);
hmac_sha1(authKey, 20, cur_salt->salt, cur_salt->salt_length, out, 12);
if (memcmp(out, cur_salt->msgAuthenticationParameters, 12) == 0)
cracked[index] = 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void snmp_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_snmp = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
snmp_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
convolution_sgemm_pack1to4_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void im2col_sgemm_pack1to4_int8_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt)
{
#if NCNN_AVX512VNNI && __AVX512F__ && !__AVX512VNNI__
if (ncnn::cpu_support_x86_avx512_vnni())
{
extern void im2col_sgemm_pack1to4_int8_sse_avx512vnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack1to4_int8_sse_avx512vnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_AVXVNNI && __AVX2__ && !__AVXVNNI__
if (ncnn::cpu_support_x86_avx_vnni())
{
extern void im2col_sgemm_pack1to4_int8_sse_avxvnni(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack1to4_int8_sse_avxvnni(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
#if NCNN_XOP && __SSE2__ && !__XOP__
if (ncnn::cpu_support_x86_xop())
{
extern void im2col_sgemm_pack1to4_int8_sse_xop(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Option& opt);
im2col_sgemm_pack1to4_int8_sse_xop(bottom_im2col, top_blob, kernel, opt);
return;
}
#endif
// Mat bottom_im2col(size, maxk, inch, 8u, 8, opt.workspace_allocator);
const int size = bottom_im2col.w;
const int maxk = bottom_im2col.h;
const int inch = bottom_im2col.c;
const int outch = top_blob.c;
// permute
Mat tmp;
if (inch >= 4)
{
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch / 4 + inch % 4, size / 4 + (size % 4) / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch / 4 + inch % 4, size / 2 + size % 2, 4u, 4, opt.workspace_allocator);
else
tmp.create(maxk, inch / 4 + inch % 4, size, 4u, 4, opt.workspace_allocator);
#endif
}
else
{
#if __AVX2__
if (size >= 4)
tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
#else
if (size >= 2)
tmp.create(2 * maxk, inch, size / 2 + size % 2, 1u, 1, opt.workspace_allocator);
else
tmp.create(maxk, inch, size, 1u, 1, opt.workspace_allocator);
#endif
}
{
#if __AVX2__
int remain_size_start = 0;
int nn_size = size >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
signed char* tmpptr = tmp.channel(i / 4);
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr[8] = img0[2];
tmpptr[9] = img1[2];
tmpptr[10] = img2[2];
tmpptr[11] = img3[2];
tmpptr[12] = img0[3];
tmpptr[13] = img1[3];
tmpptr[14] = img2[3];
tmpptr[15] = img3[3];
tmpptr += 16;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += size;
}
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#else
int remain_size_start = 0;
int nn_size = (size - remain_size_start) >> 1;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
#if __AVX2__
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
signed char* tmpptr = tmp.channel(i / 2);
#endif
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr[4] = img0[1];
tmpptr[5] = img1[1];
tmpptr[6] = img2[1];
tmpptr[7] = img3[1];
tmpptr += 8;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr += 2;
img0 += size;
}
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
#if __AVX2__
signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
int q = 0;
for (; q + 3 < inch; q += 4)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
const signed char* img1 = (const signed char*)bottom_im2col.channel(q + 1) + i;
const signed char* img2 = (const signed char*)bottom_im2col.channel(q + 2) + i;
const signed char* img3 = (const signed char*)bottom_im2col.channel(q + 3) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img1[0];
tmpptr[2] = img2[0];
tmpptr[3] = img3[0];
tmpptr += 4;
img0 += size;
img1 += size;
img2 += size;
img3 += size;
}
}
for (; q < inch; q++)
{
const signed char* img0 = (const signed char*)bottom_im2col.channel(q) + i;
for (int k = 0; k < maxk; k++)
{
tmpptr[0] = img0[0];
tmpptr += 1;
img0 += size;
}
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
int* outptr0 = top_blob.channel(p);
int i = 0;
#if __AVX2__
for (; i + 3 < size; i += 4)
{
const signed char* tmpptr = tmp.channel(i / 4);
const signed char* kptr0 = kernel.channel(p / 4);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
__m256i _sum00_12 = _mm256_setzero_si256();
__m256i _sum20_32 = _mm256_setzero_si256();
if (nn4 > 0)
{
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _sum10_02 = _mm256_setzero_si256();
__m256i _sum30_22 = _mm256_setzero_si256();
#else
__m256i _sum10_02 = _mm256_setzero_si256();
__m256i _sum01_13 = _mm256_setzero_si256();
__m256i _sum11_03 = _mm256_setzero_si256();
__m256i _sum30_22 = _mm256_setzero_si256();
__m256i _sum21_33 = _mm256_setzero_si256();
__m256i _sum31_23 = _mm256_setzero_si256();
#endif
int j = 0;
for (; j < nn4; j++)
{
__m128i _val0123 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val0123_16 = _mm256_cvtepi8_epi16(_val0123);
__m256i _val01_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(1, 1, 0, 0));
__m256i _val23_16 = _mm256_permute4x64_epi64(_val0123_16, _MM_SHUFFLE(3, 3, 2, 2));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
__m256i _val32_16 = _mm256_permute4x64_epi64(_val23_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16);
_sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16);
_sum20_32 = _mm256_dpwssd_epi32(_sum20_32, _val23_16, _w01_16);
_sum30_22 = _mm256_dpwssd_epi32(_sum30_22, _val32_16, _w01_16);
#else
__m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16);
__m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16);
__m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16);
__m256i _sl20_31 = _mm256_mullo_epi16(_val23_16, _w01_16);
__m256i _sh20_31 = _mm256_mulhi_epi16(_val23_16, _w01_16);
__m256i _sl30_21 = _mm256_mullo_epi16(_val32_16, _w01_16);
__m256i _sh30_21 = _mm256_mulhi_epi16(_val32_16, _w01_16);
_sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_unpacklo_epi16(_sl00_11, _sh00_11));
_sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_unpacklo_epi16(_sl10_01, _sh10_01));
_sum01_13 = _mm256_add_epi32(_sum01_13, _mm256_unpackhi_epi16(_sl00_11, _sh00_11));
_sum11_03 = _mm256_add_epi32(_sum11_03, _mm256_unpackhi_epi16(_sl10_01, _sh10_01));
_sum20_32 = _mm256_add_epi32(_sum20_32, _mm256_unpacklo_epi16(_sl20_31, _sh20_31));
_sum30_22 = _mm256_add_epi32(_sum30_22, _mm256_unpacklo_epi16(_sl30_21, _sh30_21));
_sum21_33 = _mm256_add_epi32(_sum21_33, _mm256_unpackhi_epi16(_sl20_31, _sh20_31));
_sum31_23 = _mm256_add_epi32(_sum31_23, _mm256_unpackhi_epi16(_sl30_21, _sh30_21));
#endif
tmpptr += 16;
kptr0 += 16;
}
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02);
_sum20_32 = _mm256_hadd_epi32(_sum20_32, _sum30_22);
__m256i _perm_mask = _mm256_set_epi32(5, 1, 6, 2, 7, 3, 4, 0);
_sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask);
_sum20_32 = _mm256_permutevar8x32_epi32(_sum20_32, _perm_mask);
#else
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_12, _sum10_02);
_tmp1 = _mm256_unpacklo_epi32(_sum01_13, _sum11_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_12, _sum10_02);
_tmp3 = _mm256_unpackhi_epi32(_sum01_13, _sum11_03);
_sum00_12 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_02 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum01_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum11_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum20_32, _sum30_22);
_tmp1 = _mm256_unpacklo_epi32(_sum21_33, _sum31_23);
_tmp2 = _mm256_unpackhi_epi32(_sum20_32, _sum30_22);
_tmp3 = _mm256_unpackhi_epi32(_sum21_33, _sum31_23);
_sum20_32 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum30_22 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum21_33 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum31_23 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_12 = _mm256_add_epi32(_sum00_12, _sum10_02);
_sum01_13 = _mm256_add_epi32(_sum01_13, _sum11_03);
_sum00_12 = _mm256_add_epi32(_sum00_12, _sum01_13);
_sum20_32 = _mm256_add_epi32(_sum20_32, _sum30_22);
_sum21_33 = _mm256_add_epi32(_sum21_33, _sum31_23);
_sum20_32 = _mm256_add_epi32(_sum20_32, _sum21_33);
__m256i _perm_mask = _mm256_set_epi32(6, 4, 3, 1, 7, 5, 2, 0);
_sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask);
_sum20_32 = _mm256_permutevar8x32_epi32(_sum20_32, _perm_mask);
#endif
}
__m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0);
__m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1);
__m128i _sum20 = _mm256_extracti128_si256(_sum20_32, 0);
__m128i _sum30 = _mm256_extracti128_si256(_sum20_32, 1);
int j = 0;
for (; j < nn1; j++)
{
__m128i _val01 = _mm_set_epi16(tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[0], tmpptr[0], tmpptr[0], tmpptr[0]);
__m128i _val23 = _mm_set_epi16(tmpptr[3], tmpptr[3], tmpptr[3], tmpptr[3], tmpptr[2], tmpptr[2], tmpptr[2], tmpptr[2]);
__m128i _w0123 = _mm_set_epi16(kptr0[3], kptr0[2], kptr0[1], kptr0[0], kptr0[3], kptr0[2], kptr0[1], kptr0[0]);
__m128i _sl00 = _mm_mullo_epi16(_val01, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val01, _w0123);
__m128i _sl10 = _mm_mullo_epi16(_val23, _w0123);
__m128i _sh10 = _mm_mulhi_epi16(_val23, _w0123);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00));
_sum20 = _mm_add_epi32(_sum20, _mm_unpacklo_epi16(_sl10, _sh10));
_sum30 = _mm_add_epi32(_sum30, _mm_unpackhi_epi16(_sl10, _sh10));
tmpptr += 4;
kptr0 += 4;
}
_mm_storeu_si128((__m128i*)outptr0, _sum00);
_mm_storeu_si128((__m128i*)(outptr0 + 4), _sum10);
_mm_storeu_si128((__m128i*)(outptr0 + 8), _sum20);
_mm_storeu_si128((__m128i*)(outptr0 + 12), _sum30);
outptr0 += 16;
}
#endif
for (; i + 1 < size; i += 2)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2);
#else
const signed char* tmpptr = tmp.channel(i / 2);
#endif
const signed char* kptr0 = kernel.channel(p);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
#if __AVX2__
__m256i _sum00_12 = _mm256_setzero_si256();
#else
__m128i _sum00 = _mm_setzero_si128();
__m128i _sum10 = _mm_setzero_si128();
#endif
if (nn4 > 0)
{
#if __AVX2__
#if __AVXVNNI__ || __AVX512VNNI__
__m256i _sum10_02 = _mm256_setzero_si256();
#else
__m256i _sum10_02 = _mm256_setzero_si256();
__m256i _sum01_13 = _mm256_setzero_si256();
__m256i _sum11_03 = _mm256_setzero_si256();
#endif
#else
#if __XOP__
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
#else
__m128i _sum01 = _mm_setzero_si128();
__m128i _sum02 = _mm_setzero_si128();
__m128i _sum03 = _mm_setzero_si128();
__m128i _sum11 = _mm_setzero_si128();
__m128i _sum12 = _mm_setzero_si128();
__m128i _sum13 = _mm_setzero_si128();
#endif
#endif
int j = 0;
for (; j < nn4; j++)
{
#if __AVX2__
__m128i _val01 = _mm_loadu_si128((const __m128i*)tmpptr);
__m256i _val01_16 = _mm256_cvtepi8_epi16(_val01);
_val01_16 = _mm256_permute4x64_epi64(_val01_16, _MM_SHUFFLE(1, 1, 0, 0));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m256i _w01_16 = _mm256_cvtepi8_epi16(_w01);
__m256i _val10_16 = _mm256_permute4x64_epi64(_val01_16, 78);
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_dpwssd_epi32(_sum00_12, _val01_16, _w01_16);
_sum10_02 = _mm256_dpwssd_epi32(_sum10_02, _val10_16, _w01_16);
#else
__m256i _sl00_11 = _mm256_mullo_epi16(_val01_16, _w01_16);
__m256i _sh00_11 = _mm256_mulhi_epi16(_val01_16, _w01_16);
__m256i _sl10_01 = _mm256_mullo_epi16(_val10_16, _w01_16);
__m256i _sh10_01 = _mm256_mulhi_epi16(_val10_16, _w01_16);
_sum00_12 = _mm256_add_epi32(_sum00_12, _mm256_unpacklo_epi16(_sl00_11, _sh00_11));
_sum10_02 = _mm256_add_epi32(_sum10_02, _mm256_unpacklo_epi16(_sl10_01, _sh10_01));
_sum01_13 = _mm256_add_epi32(_sum01_13, _mm256_unpackhi_epi16(_sl00_11, _sh00_11));
_sum11_03 = _mm256_add_epi32(_sum11_03, _mm256_unpackhi_epi16(_sl10_01, _sh10_01));
#endif
#else
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
_val01 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
_val01 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
__m128i _val0 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _val1 = _mm_shuffle_epi32(_val01, _MM_SHUFFLE(3, 2, 3, 2));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
#if __XOP__
_sum00 = _mm_maddd_epi16(_val0, _w0, _sum00);
_sum01 = _mm_maddd_epi16(_val0, _w1, _sum01);
_sum10 = _mm_maddd_epi16(_val1, _w0, _sum10);
_sum11 = _mm_maddd_epi16(_val1, _w1, _sum11);
#else
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl01 = _mm_mullo_epi16(_val0, _w1);
__m128i _sh01 = _mm_mulhi_epi16(_val0, _w1);
__m128i _sl10 = _mm_mullo_epi16(_val1, _w0);
__m128i _sh10 = _mm_mulhi_epi16(_val1, _w0);
__m128i _sl11 = _mm_mullo_epi16(_val1, _w1);
__m128i _sh11 = _mm_mulhi_epi16(_val1, _w1);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum01 = _mm_add_epi32(_sum01, _mm_unpackhi_epi16(_sl00, _sh00));
_sum02 = _mm_add_epi32(_sum02, _mm_unpacklo_epi16(_sl01, _sh01));
_sum03 = _mm_add_epi32(_sum03, _mm_unpackhi_epi16(_sl01, _sh01));
_sum10 = _mm_add_epi32(_sum10, _mm_unpacklo_epi16(_sl10, _sh10));
_sum11 = _mm_add_epi32(_sum11, _mm_unpackhi_epi16(_sl10, _sh10));
_sum12 = _mm_add_epi32(_sum12, _mm_unpacklo_epi16(_sl11, _sh11));
_sum13 = _mm_add_epi32(_sum13, _mm_unpackhi_epi16(_sl11, _sh11));
#endif
#endif
tmpptr += 8;
kptr0 += 16;
}
#if __AVX2__
#if __AVXVNNI__ || __AVX512VNNI__
_sum00_12 = _mm256_hadd_epi32(_sum00_12, _sum10_02);
__m256i _perm_mask = _mm256_set_epi32(5, 1, 6, 2, 7, 3, 4, 0);
_sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask);
#else
// transpose 4x8
{
__m256i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm256_unpacklo_epi32(_sum00_12, _sum10_02);
_tmp1 = _mm256_unpacklo_epi32(_sum01_13, _sum11_03);
_tmp2 = _mm256_unpackhi_epi32(_sum00_12, _sum10_02);
_tmp3 = _mm256_unpackhi_epi32(_sum01_13, _sum11_03);
_sum00_12 = _mm256_unpacklo_epi64(_tmp0, _tmp1);
_sum10_02 = _mm256_unpackhi_epi64(_tmp0, _tmp1);
_sum01_13 = _mm256_unpacklo_epi64(_tmp2, _tmp3);
_sum11_03 = _mm256_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00_12 = _mm256_add_epi32(_sum00_12, _sum10_02);
_sum01_13 = _mm256_add_epi32(_sum01_13, _sum11_03);
_sum00_12 = _mm256_add_epi32(_sum00_12, _sum01_13);
__m256i _perm_mask = _mm256_set_epi32(6, 4, 3, 1, 7, 5, 2, 0);
_sum00_12 = _mm256_permutevar8x32_epi32(_sum00_12, _perm_mask);
#endif
#else
#if __XOP__
_sum00 = _mm_hadd_epi32(_sum00, _sum01);
_sum10 = _mm_hadd_epi32(_sum10, _sum11);
#else
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum00, _sum01);
_tmp1 = _mm_unpacklo_epi32(_sum02, _sum03);
_tmp2 = _mm_unpackhi_epi32(_sum00, _sum01);
_tmp3 = _mm_unpackhi_epi32(_sum02, _sum03);
_sum00 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum01 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum02 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum03 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum10, _sum11);
_tmp1 = _mm_unpacklo_epi32(_sum12, _sum13);
_tmp2 = _mm_unpackhi_epi32(_sum10, _sum11);
_tmp3 = _mm_unpackhi_epi32(_sum12, _sum13);
_sum10 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum11 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum12 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum13 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum00 = _mm_add_epi32(_sum00, _sum01);
_sum02 = _mm_add_epi32(_sum02, _sum03);
_sum10 = _mm_add_epi32(_sum10, _sum11);
_sum12 = _mm_add_epi32(_sum12, _sum13);
_sum00 = _mm_add_epi32(_sum00, _sum02);
_sum10 = _mm_add_epi32(_sum10, _sum12);
#endif
#endif
}
#if __AVX2__
__m128i _sum00 = _mm256_extracti128_si256(_sum00_12, 0);
__m128i _sum10 = _mm256_extracti128_si256(_sum00_12, 1);
#endif
int j = 0;
for (; j < nn1; j++)
{
__m128i _val = _mm_set_epi16(tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[1], tmpptr[0], tmpptr[0], tmpptr[0], tmpptr[0]);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
_w0123 = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
_w0123 = _mm_unpacklo_epi8(_w0123, _extw0123);
#endif
_w0123 = _mm_shuffle_epi32(_w0123, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _sl00 = _mm_mullo_epi16(_val, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val, _w0123);
_sum00 = _mm_add_epi32(_sum00, _mm_unpacklo_epi16(_sl00, _sh00));
_sum10 = _mm_add_epi32(_sum10, _mm_unpackhi_epi16(_sl00, _sh00));
tmpptr += 2;
kptr0 += 4;
}
_mm_storeu_si128((__m128i*)outptr0, _sum00);
_mm_storeu_si128((__m128i*)(outptr0 + 4), _sum10);
outptr0 += 8;
}
for (; i < size; i++)
{
#if __AVX2__
const signed char* tmpptr = tmp.channel(i / 4 + (i % 4) / 2 + i % 2);
#else
const signed char* tmpptr = tmp.channel(i / 2 + i % 2);
#endif
const signed char* kptr0 = kernel.channel(p);
int nn4 = (inch / 4) * maxk;
int nn1 = (inch % 4) * maxk;
__m128i _sum0 = _mm_setzero_si128();
if (nn4 > 0)
{
__m128i _sum1 = _mm_setzero_si128();
__m128i _sum2 = _mm_setzero_si128();
__m128i _sum3 = _mm_setzero_si128();
int j = 0;
for (; j < nn4; j++)
{
__m128i _val01 = _mm_loadl_epi64((const __m128i*)tmpptr);
#if __SSE4_1__
__m128i _val0 = _mm_cvtepi8_epi16(_val01);
#else
__m128i _extval01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _val01);
__m128i _val0 = _mm_unpacklo_epi8(_val01, _extval01);
#endif
_val0 = _mm_shuffle_epi32(_val0, _MM_SHUFFLE(1, 0, 1, 0));
__m128i _w01 = _mm_loadu_si128((const __m128i*)kptr0);
__m128i _extw01 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w01);
__m128i _w0 = _mm_unpacklo_epi8(_w01, _extw01);
__m128i _w1 = _mm_unpackhi_epi8(_w01, _extw01);
__m128i _sl00 = _mm_mullo_epi16(_val0, _w0);
__m128i _sh00 = _mm_mulhi_epi16(_val0, _w0);
__m128i _sl01 = _mm_mullo_epi16(_val0, _w1);
__m128i _sh01 = _mm_mulhi_epi16(_val0, _w1);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
_sum1 = _mm_add_epi32(_sum1, _mm_unpackhi_epi16(_sl00, _sh00));
_sum2 = _mm_add_epi32(_sum2, _mm_unpacklo_epi16(_sl01, _sh01));
_sum3 = _mm_add_epi32(_sum3, _mm_unpackhi_epi16(_sl01, _sh01));
tmpptr += 4;
kptr0 += 16;
}
// transpose 4x4
{
__m128i _tmp0, _tmp1, _tmp2, _tmp3;
_tmp0 = _mm_unpacklo_epi32(_sum0, _sum1);
_tmp1 = _mm_unpacklo_epi32(_sum2, _sum3);
_tmp2 = _mm_unpackhi_epi32(_sum0, _sum1);
_tmp3 = _mm_unpackhi_epi32(_sum2, _sum3);
_sum0 = _mm_unpacklo_epi64(_tmp0, _tmp1);
_sum1 = _mm_unpackhi_epi64(_tmp0, _tmp1);
_sum2 = _mm_unpacklo_epi64(_tmp2, _tmp3);
_sum3 = _mm_unpackhi_epi64(_tmp2, _tmp3);
}
_sum0 = _mm_add_epi32(_sum0, _sum1);
_sum2 = _mm_add_epi32(_sum2, _sum3);
_sum0 = _mm_add_epi32(_sum0, _sum2);
}
int j = 0;
for (; j < nn1; j++)
{
__m128i _val = _mm_set1_epi16(tmpptr[0]);
__m128i _w0123 = _mm_loadl_epi64((const __m128i*)kptr0);
#if __SSE4_1__
_w0123 = _mm_cvtepi8_epi16(_w0123);
#else
__m128i _extw0123 = _mm_cmpgt_epi8(_mm_setzero_si128(), _w0123);
_w0123 = _mm_unpacklo_epi8(_w0123, _extw0123);
#endif
__m128i _sl00 = _mm_mullo_epi16(_val, _w0123);
__m128i _sh00 = _mm_mulhi_epi16(_val, _w0123);
_sum0 = _mm_add_epi32(_sum0, _mm_unpacklo_epi16(_sl00, _sh00));
tmpptr += 1;
kptr0 += 4;
}
_mm_storeu_si128((__m128i*)outptr0, _sum0);
outptr0 += 4;
}
}
}
static void convolution_im2col_sgemm_transform_kernel_pack1to4_int8_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h)
{
const int maxk = kernel_w * kernel_h;
// interleave
// src = maxk-inch-outch
// dst = 4a-4b-maxk-inch/4a-outch/4b
Mat kernel = _kernel.reshape(maxk, inch, outch);
if (inch >= 4)
kernel_tm.create(16 * maxk, inch / 4 + inch % 4, outch / 4, (size_t)1u);
else
kernel_tm.create(4 * maxk, inch, outch / 4, (size_t)1u);
for (int q = 0; q + 3 < outch; q += 4)
{
signed char* g00 = kernel_tm.channel(q / 4);
int p = 0;
for (; p + 3 < inch; p += 4)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 4; j++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p + j);
g00[0] = k00[k];
g00++;
}
}
}
}
for (; p < inch; p++)
{
for (int k = 0; k < maxk; k++)
{
for (int i = 0; i < 4; i++)
{
const signed char* k00 = kernel.channel(q + i).row<const signed char>(p);
g00[0] = k00[k];
g00++;
}
}
}
}
}
static void convolution_im2col_sgemm_pack1to4_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
const int size = outw * outh;
const int maxk = kernel_w * kernel_h;
// im2col
Mat bottom_im2col(size, maxk, inch, 1u, 1, opt.workspace_allocator);
{
const int gap = w * stride_h - outw * stride_w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < inch; p++)
{
const Mat img = bottom_blob.channel(p);
signed char* ptr = bottom_im2col.channel(p);
for (int u = 0; u < kernel_h; u++)
{
for (int v = 0; v < kernel_w; v++)
{
const signed char* sptr = img.row<const signed char>(dilation_h * u) + dilation_w * v;
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
ptr[2] = sptr[stride_w * 2];
ptr[3] = sptr[stride_w * 3];
sptr += stride_w * 4;
ptr += 4;
}
for (; j + 1 < outw; j += 2)
{
ptr[0] = sptr[0];
ptr[1] = sptr[stride_w];
sptr += stride_w * 2;
ptr += 2;
}
for (; j < outw; j++)
{
ptr[0] = sptr[0];
sptr += stride_w;
ptr += 1;
}
sptr += gap;
}
}
}
}
}
im2col_sgemm_pack1to4_int8_sse(bottom_im2col, top_blob, kernel, opt);
}
|
fac_setup2.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_sstruct_ls.h"
#include "fac.h"
/*--------------------------------------------------------------------------
* hypre_FacSetup2: Constructs the level composite structures.
* Each consists only of two levels, the refinement patches and the
* coarse parent base grids.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_FacSetup2( void *fac_vdata,
hypre_SStructMatrix *A_in,
hypre_SStructVector *b,
hypre_SStructVector *x )
{
hypre_FACData *fac_data = (hypre_FACData*)fac_vdata;
HYPRE_Int *plevels = (fac_data-> plevels);
hypre_Index *rfactors = (fac_data-> prefinements);
MPI_Comm comm;
HYPRE_Int ndim;
HYPRE_Int npart;
HYPRE_Int nparts_level = 2;
HYPRE_Int part_crse = 0;
HYPRE_Int part_fine = 1;
hypre_SStructPMatrix *A_pmatrix;
hypre_StructMatrix *A_smatrix;
hypre_Box *A_smatrix_dbox;
hypre_SStructGrid **grid_level;
hypre_SStructGraph **graph_level;
HYPRE_Int part, level;
HYPRE_Int nvars;
hypre_SStructGraph *graph;
hypre_SStructGrid *grid;
hypre_SStructPGrid *pgrid;
hypre_StructGrid *sgrid;
hypre_BoxArray *sgrid_boxes;
hypre_Box *sgrid_box;
hypre_SStructStencil *stencils;
hypre_BoxArray *iboxarray;
hypre_Index *refine_factors;
hypre_IndexRef box_start;
hypre_IndexRef box_end;
hypre_SStructUVEntry **Uventries;
HYPRE_Int nUventries;
HYPRE_Int *iUventries;
hypre_SStructUVEntry *Uventry;
hypre_SStructUEntry *Uentry;
hypre_Index index, to_index, stride;
HYPRE_Int var, to_var, to_part, level_part, level_topart;
HYPRE_Int var1, var2;
HYPRE_Int i, j, k, to_rank, row_coord, nUentries;
hypre_BoxManEntry *boxman_entry;
hypre_SStructMatrix *A_rap;
hypre_SStructMatrix **A_level;
hypre_SStructVector **b_level;
hypre_SStructVector **x_level;
hypre_SStructVector **r_level;
hypre_SStructVector **e_level;
hypre_SStructPVector **tx_level;
hypre_SStructVector *tx;
void **matvec_data_level;
void **pmatvec_data_level;
void *matvec_data;
void **relax_data_level;
void **interp_data_level;
void **restrict_data_level;
/* coarsest grid solver */
HYPRE_Int csolver_type =(fac_data-> csolver_type);
HYPRE_SStructSolver crse_solver=NULL;
HYPRE_SStructSolver crse_precond=NULL;
HYPRE_Int max_level = hypre_FACDataMaxLevels(fac_data);
HYPRE_Int relax_type = fac_data -> relax_type;
HYPRE_Int usr_jacobi_weight= fac_data -> usr_jacobi_weight;
HYPRE_Real jacobi_weight = fac_data -> jacobi_weight;
HYPRE_Int *levels;
HYPRE_Int *part_to_level;
HYPRE_Int box, box_volume;
HYPRE_Int max_box_volume;
HYPRE_Int stencil_size;
hypre_Index stencil_shape_i, loop_size;
HYPRE_Int *stencil_vars;
HYPRE_Real *values;
HYPRE_Real *A_smatrix_value;
HYPRE_Int iA;
HYPRE_Int *nrows;
HYPRE_Int **ncols;
HYPRE_Int **rows;
HYPRE_Int **cols;
HYPRE_Int *cnt;
HYPRE_Real *vals;
HYPRE_Int *level_rows;
HYPRE_Int *level_cols;
HYPRE_Int level_cnt;
HYPRE_IJMatrix ij_A;
HYPRE_Int matrix_type;
HYPRE_Int max_cycles;
HYPRE_Int ierr = 0;
/*hypre_SStructMatrix *nested_A;
nested_A= hypre_TAlloc(hypre_SStructMatrix , 1);
nested_A= hypre_CoarsenAMROp(fac_vdata, A);*/
/* generate the composite operator with the computed coarse-grid operators */
hypre_AMR_RAP(A_in, rfactors, &A_rap);
(fac_data -> A_rap)= A_rap;
comm = hypre_SStructMatrixComm(A_rap);
ndim = hypre_SStructMatrixNDim(A_rap);
npart= hypre_SStructMatrixNParts(A_rap);
graph= hypre_SStructMatrixGraph(A_rap);
grid = hypre_SStructGraphGrid(graph);
ij_A = hypre_SStructMatrixIJMatrix(A_rap);
matrix_type= hypre_SStructMatrixObjectType(A_rap);
/*--------------------------------------------------------------------------
* logging arrays.
*--------------------------------------------------------------------------*/
if ((fac_data -> logging) > 0)
{
max_cycles = (fac_data -> max_cycles);
(fac_data -> norms) = hypre_TAlloc(HYPRE_Real, max_cycles);
(fac_data -> rel_norms)= hypre_TAlloc(HYPRE_Real, max_cycles);
}
/*--------------------------------------------------------------------------
* Extract the amr/sstruct level/part structure and refinement factors.
*--------------------------------------------------------------------------*/
levels = hypre_CTAlloc(HYPRE_Int, npart);
part_to_level = hypre_CTAlloc(HYPRE_Int, npart);
refine_factors= hypre_CTAlloc(hypre_Index, npart);
for (part= 0; part< npart; part++)
{
part_to_level[part] = plevels[part];
levels[plevels[part]]= part;
for (i= 0; i< ndim; i++)
{
refine_factors[plevels[part]][i]= rfactors[part][i];
}
for (i= ndim; i< 3; i++)
{
refine_factors[plevels[part]][i]= 1;
}
}
(fac_data -> level_to_part) = levels;
(fac_data -> part_to_level) = part_to_level;
(fac_data -> refine_factors)= refine_factors;
/*--------------------------------------------------------------------------
* Create the level SStructGrids using the original composite grid.
*--------------------------------------------------------------------------*/
grid_level= hypre_TAlloc(hypre_SStructGrid *, max_level+1);
for (level= max_level; level >= 0; level--)
{
HYPRE_SStructGridCreate(comm, ndim, nparts_level, &grid_level[level]);
}
for (level= max_level; level >= 0; level--)
{
/*--------------------------------------------------------------------------
* Create the fine part of the finest level SStructGrids using the original
* composite grid.
*--------------------------------------------------------------------------*/
if (level == max_level)
{
pgrid = hypre_SStructGridPGrid(grid, levels[level]);
iboxarray= hypre_SStructPGridCellIBoxArray(pgrid);
for (box = 0; box < hypre_BoxArraySize(iboxarray); box++)
{
HYPRE_SStructGridSetExtents(grid_level[level], part_fine,
hypre_BoxIMin( hypre_BoxArrayBox(iboxarray,box) ),
hypre_BoxIMax( hypre_BoxArrayBox(iboxarray,box) ));
}
HYPRE_SStructGridSetVariables( grid_level[level], part_fine,
hypre_SStructPGridNVars(pgrid),
hypre_SStructPGridVarTypes(pgrid) );
/*-----------------------------------------------------------------------
* Create the coarsest level grid if A has only 1 level
*-----------------------------------------------------------------------*/
if (level == 0)
{
for (box = 0; box < hypre_BoxArraySize(iboxarray); box++)
{
HYPRE_SStructGridSetExtents(grid_level[level], part_crse,
hypre_BoxIMin( hypre_BoxArrayBox(iboxarray,box) ),
hypre_BoxIMax( hypre_BoxArrayBox(iboxarray,box) ));
}
HYPRE_SStructGridSetVariables( grid_level[level], part_crse,
hypre_SStructPGridNVars(pgrid),
hypre_SStructPGridVarTypes(pgrid) );
}
}
/*--------------------------------------------------------------------------
* Create the coarse part of level SStructGrids using the original composite
* grid, the coarsest part SStructGrid, and the fine part if level < max_level.
*--------------------------------------------------------------------------*/
if (level > 0)
{
pgrid = hypre_SStructGridPGrid(grid, levels[level-1]);
iboxarray= hypre_SStructPGridCellIBoxArray(pgrid);
for (box = 0; box < hypre_BoxArraySize(iboxarray); box++)
{
HYPRE_SStructGridSetExtents(grid_level[level], part_crse,
hypre_BoxIMin( hypre_BoxArrayBox(iboxarray,box) ),
hypre_BoxIMax( hypre_BoxArrayBox(iboxarray,box) ));
HYPRE_SStructGridSetExtents(grid_level[level-1], part_fine,
hypre_BoxIMin( hypre_BoxArrayBox(iboxarray,box) ),
hypre_BoxIMax( hypre_BoxArrayBox(iboxarray,box) ));
if (level == 1)
{
HYPRE_SStructGridSetExtents(grid_level[level-1], part_crse,
hypre_BoxIMin( hypre_BoxArrayBox(iboxarray,box) ),
hypre_BoxIMax( hypre_BoxArrayBox(iboxarray,box) ));
}
}
HYPRE_SStructGridSetVariables( grid_level[level], part_crse,
hypre_SStructPGridNVars(pgrid),
hypre_SStructPGridVarTypes(pgrid) );
HYPRE_SStructGridSetVariables( grid_level[level-1], part_fine,
hypre_SStructPGridNVars(pgrid),
hypre_SStructPGridVarTypes(pgrid) );
/* coarsest SStructGrid */
if (level == 1)
{
HYPRE_SStructGridSetVariables( grid_level[level-1], part_crse,
hypre_SStructPGridNVars(pgrid),
hypre_SStructPGridVarTypes(pgrid) );
}
}
HYPRE_SStructGridAssemble(grid_level[level]);
}
(fac_data -> grid_level)= grid_level;
/*-----------------------------------------------------------
* Set up the graph. Create only the structured components
* first.
*-----------------------------------------------------------*/
graph_level= hypre_TAlloc(hypre_SStructGraph *, max_level+1);
for (level= max_level; level >= 0; level--)
{
HYPRE_SStructGraphCreate(comm, grid_level[level], &graph_level[level]);
}
for (level= max_level; level >= 0; level--)
{
/*-----------------------------------------------------------------------
* Create the fine part of the finest level structured graph connection.
*-----------------------------------------------------------------------*/
if (level == max_level)
{
pgrid = hypre_SStructGridPGrid(grid, levels[level]);
nvars = hypre_SStructPGridNVars(pgrid);
for (var1 = 0; var1 < nvars; var1++)
{
stencils= hypre_SStructGraphStencil(graph, levels[level], var1);
HYPRE_SStructGraphSetStencil(graph_level[level], part_fine, var1, stencils);
if (level == 0)
{
HYPRE_SStructGraphSetStencil(graph_level[level], part_crse, var1, stencils);
}
}
}
/*--------------------------------------------------------------------------
* Create the coarse part of the graph_level using the graph of A, and the
* and the fine part if level < max_level.
*--------------------------------------------------------------------------*/
if (level > 0)
{
pgrid = hypre_SStructGridPGrid(grid, levels[level-1]);
nvars = hypre_SStructPGridNVars(pgrid);
for (var1 = 0; var1 < nvars; var1++)
{
stencils= hypre_SStructGraphStencil(graph, levels[level-1], var1);
HYPRE_SStructGraphSetStencil(graph_level[level], part_crse, var1, stencils );
HYPRE_SStructGraphSetStencil(graph_level[level-1], part_fine, var1, stencils );
if (level == 1)
{
HYPRE_SStructGraphSetStencil(graph_level[level-1], part_crse, var1, stencils );
}
}
}
}
/*-----------------------------------------------------------
* Extract the non-stencil graph structure: assuming only like
* variables connect. Also count the number of unstructured
* connections per part.
*
* THE COARSEST COMPOSITE MATRIX DOES NOT HAVE ANY NON-STENCIL
* CONNECTIONS.
*-----------------------------------------------------------*/
Uventries = hypre_SStructGraphUVEntries(graph);
nUventries= hypre_SStructGraphNUVEntries(graph);
iUventries= hypre_SStructGraphIUVEntries(graph);
nrows = hypre_CTAlloc(HYPRE_Int, max_level+1);
for (i= 0; i< nUventries; i++)
{
Uventry= Uventries[iUventries[i]];
part = hypre_SStructUVEntryPart(Uventry);
hypre_CopyIndex(hypre_SStructUVEntryIndex(Uventry), index);
var = hypre_SStructUVEntryVar(Uventry);
nUentries= hypre_SStructUVEntryNUEntries(Uventry);
for (k= 0; k< nUentries; k++)
{
Uentry = hypre_SStructUVEntryUEntry(Uventry, k);
to_part = hypre_SStructUEntryToPart(Uentry);
hypre_CopyIndex(hypre_SStructUEntryToIndex(Uentry), to_index);
to_var = hypre_SStructUEntryToVar(Uentry);
if ( part_to_level[part] >= part_to_level[to_part] )
{
level = part_to_level[part];
level_part = part_fine;
level_topart = part_crse;
}
else
{
level = part_to_level[to_part];
level_part = part_crse;
level_topart = part_fine;
}
nrows[level]++;
HYPRE_SStructGraphAddEntries(graph_level[level], level_part, index,
var, level_topart, to_index, to_var);
}
}
for (level= 0; level <= max_level; level++)
{
HYPRE_SStructGraphAssemble(graph_level[level]);
}
(fac_data -> graph_level)= graph_level;
/*---------------------------------------------------------------
* Create the level SStruct_Vectors, and temporary global
* sstuct_vector.
*---------------------------------------------------------------*/
b_level= hypre_TAlloc(hypre_SStructVector *, max_level+1);
x_level= hypre_TAlloc(hypre_SStructVector *, max_level+1);
r_level= hypre_TAlloc(hypre_SStructVector *, max_level+1);
e_level= hypre_TAlloc(hypre_SStructVector *, max_level+1);
tx_level= hypre_TAlloc(hypre_SStructPVector *, max_level+1);
for (level= 0; level<= max_level; level++)
{
HYPRE_SStructVectorCreate(comm, grid_level[level], &b_level[level]);
HYPRE_SStructVectorInitialize(b_level[level]);
HYPRE_SStructVectorAssemble(b_level[level]);
HYPRE_SStructVectorCreate(comm, grid_level[level], &x_level[level]);
HYPRE_SStructVectorInitialize(x_level[level]);
HYPRE_SStructVectorAssemble(x_level[level]);
HYPRE_SStructVectorCreate(comm, grid_level[level], &r_level[level]);
HYPRE_SStructVectorInitialize(r_level[level]);
HYPRE_SStructVectorAssemble(r_level[level]);
HYPRE_SStructVectorCreate(comm, grid_level[level], &e_level[level]);
HYPRE_SStructVectorInitialize(e_level[level]);
HYPRE_SStructVectorAssemble(e_level[level]);
/* temporary vector for fine patch relaxation */
hypre_SStructPVectorCreate(comm,
hypre_SStructGridPGrid(grid_level[level], part_fine),
&tx_level[level]);
hypre_SStructPVectorInitialize(tx_level[level]);
hypre_SStructPVectorAssemble(tx_level[level]);
}
/* temp SStructVectors */
HYPRE_SStructVectorCreate(comm, grid, &tx);
HYPRE_SStructVectorInitialize(tx);
HYPRE_SStructVectorAssemble(tx);
(fac_data -> b_level) = b_level;
(fac_data -> x_level) = x_level;
(fac_data -> r_level) = r_level;
(fac_data -> e_level) = e_level;
(fac_data -> tx_level)= tx_level;
(fac_data -> tx) = tx;
/*-----------------------------------------------------------
* Set up the level composite sstruct_matrices.
*-----------------------------------------------------------*/
A_level= hypre_TAlloc(hypre_SStructMatrix *, max_level+1);
hypre_SetIndex3(stride, 1, 1, 1);
for (level= 0; level <= max_level; level++)
{
HYPRE_SStructMatrixCreate(comm, graph_level[level], &A_level[level]);
HYPRE_SStructMatrixInitialize(A_level[level]);
max_box_volume= 0;
pgrid = hypre_SStructGridPGrid(grid, levels[level]);
nvars = hypre_SStructPGridNVars(pgrid);
for (var1 = 0; var1 < nvars; var1++)
{
sgrid= hypre_SStructPGridSGrid(pgrid, var1);
sgrid_boxes= hypre_StructGridBoxes(sgrid);
hypre_ForBoxI(i, sgrid_boxes)
{
sgrid_box = hypre_BoxArrayBox(sgrid_boxes, i);
box_volume= hypre_BoxVolume(sgrid_box);
max_box_volume= hypre_max(max_box_volume, box_volume);
}
}
values = hypre_TAlloc(HYPRE_Real, max_box_volume);
A_pmatrix= hypre_SStructMatrixPMatrix(A_rap, levels[level]);
/*-----------------------------------------------------------
* extract stencil values for all fine levels.
*-----------------------------------------------------------*/
for (var1 = 0; var1 < nvars; var1++)
{
sgrid= hypre_SStructPGridSGrid(pgrid, var1);
sgrid_boxes= hypre_StructGridBoxes(sgrid);
stencils= hypre_SStructGraphStencil(graph, levels[level], var1);
stencil_size= hypre_SStructStencilSize(stencils);
stencil_vars= hypre_SStructStencilVars(stencils);
for (i = 0; i < stencil_size; i++)
{
var2= stencil_vars[i];
A_smatrix= hypre_SStructPMatrixSMatrix(A_pmatrix, var1, var2);
hypre_CopyIndex(hypre_SStructStencilEntry(stencils, i), stencil_shape_i);
hypre_ForBoxI(j, sgrid_boxes)
{
sgrid_box= hypre_BoxArrayBox(sgrid_boxes, j);
box_start= hypre_BoxIMin(sgrid_box);
box_end = hypre_BoxIMax(sgrid_box);
A_smatrix_dbox= hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A_smatrix), j);
A_smatrix_value=
hypre_StructMatrixExtractPointerByIndex(A_smatrix, j, stencil_shape_i);
hypre_BoxGetSize(sgrid_box, loop_size);
hypre_BoxLoop2Begin(ndim, loop_size,
sgrid_box, box_start, stride, k,
A_smatrix_dbox, box_start, stride, iA);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,k,iA) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(k, iA)
{
values[k]= A_smatrix_value[iA];
}
hypre_BoxLoop2End(k, iA);
HYPRE_SStructMatrixSetBoxValues(A_level[level], part_fine, box_start, box_end,
var1, 1, &i, values);
} /* hypre_ForBoxI */
} /* for i */
} /* for var1 */
hypre_TFree(values);
/*-----------------------------------------------------------
* Extract the coarse part
*-----------------------------------------------------------*/
if (level > 0)
{
max_box_volume= 0;
pgrid = hypre_SStructGridPGrid(grid, levels[level-1]);
nvars = hypre_SStructPGridNVars(pgrid);
for (var1 = 0; var1 < nvars; var1++)
{
sgrid = hypre_SStructPGridSGrid( pgrid, var1 );
sgrid_boxes= hypre_StructGridBoxes(sgrid);
hypre_ForBoxI( i, sgrid_boxes )
{
sgrid_box = hypre_BoxArrayBox(sgrid_boxes, i);
box_volume= hypre_BoxVolume(sgrid_box);
max_box_volume= hypre_max(max_box_volume, box_volume );
}
}
values = hypre_TAlloc(HYPRE_Real, max_box_volume);
A_pmatrix= hypre_SStructMatrixPMatrix(A_rap, levels[level-1]);
/*-----------------------------------------------------------
* extract stencil values
*-----------------------------------------------------------*/
for (var1 = 0; var1 < nvars; var1++)
{
sgrid = hypre_SStructPGridSGrid(pgrid, var1);
sgrid_boxes= hypre_StructGridBoxes(sgrid);
stencils= hypre_SStructGraphStencil(graph, levels[level-1], var1);
stencil_size= hypre_SStructStencilSize(stencils);
stencil_vars= hypre_SStructStencilVars(stencils);
for (i = 0; i < stencil_size; i++)
{
var2= stencil_vars[i];
A_smatrix= hypre_SStructPMatrixSMatrix(A_pmatrix, var1, var2);
hypre_CopyIndex(hypre_SStructStencilEntry(stencils, i), stencil_shape_i);
hypre_ForBoxI( j, sgrid_boxes )
{
sgrid_box= hypre_BoxArrayBox(sgrid_boxes, j);
box_start= hypre_BoxIMin(sgrid_box);
box_end = hypre_BoxIMax(sgrid_box);
A_smatrix_dbox= hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A_smatrix), j);
A_smatrix_value=
hypre_StructMatrixExtractPointerByIndex(A_smatrix, j, stencil_shape_i);
hypre_BoxGetSize(sgrid_box, loop_size);
hypre_BoxLoop2Begin(ndim, loop_size,
sgrid_box, box_start, stride, k,
A_smatrix_dbox, box_start, stride, iA);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,k,iA) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(k, iA)
{
values[k]= A_smatrix_value[iA];
}
hypre_BoxLoop2End(k, iA);
HYPRE_SStructMatrixSetBoxValues(A_level[level], part_crse, box_start, box_end,
var1, 1, &i, values);
} /* hypre_ForBoxI */
} /* for i */
} /* for var1 */
hypre_TFree(values);
} /* if level > 0 */
} /* for level */
/*-----------------------------------------------------------
* extract the non-stencil values for all but the coarsest
* level sstruct_matrix. Use the HYPRE_IJMatrixGetValues
* for each level of A.
*-----------------------------------------------------------*/
Uventries = hypre_SStructGraphUVEntries(graph);
nUventries= hypre_SStructGraphNUVEntries(graph);
iUventries= hypre_SStructGraphIUVEntries(graph);
/*-----------------------------------------------------------
* Allocate memory for arguments of HYPRE_IJMatrixGetValues.
*-----------------------------------------------------------*/
ncols = hypre_TAlloc(HYPRE_Int *, max_level+1);
rows = hypre_TAlloc(HYPRE_Int *, max_level+1);
cols = hypre_TAlloc(HYPRE_Int *, max_level+1);
cnt = hypre_CTAlloc(HYPRE_Int, max_level+1);
ncols[0]= NULL;
rows[0] = NULL;
cols[0] = NULL;
for (level= 1; level<= max_level; level++)
{
ncols[level]= hypre_TAlloc(HYPRE_Int, nrows[level]);
for (i=0; i< nrows[level]; i++)
{
ncols[level][i]= 1;
}
rows[level] = hypre_TAlloc(HYPRE_Int, nrows[level]);
cols[level] = hypre_TAlloc(HYPRE_Int, nrows[level]);
}
for (i= 0; i< nUventries; i++)
{
Uventry = Uventries[iUventries[i]];
part = hypre_SStructUVEntryPart(Uventry);
hypre_CopyIndex(hypre_SStructUVEntryIndex(Uventry), index);
var = hypre_SStructUVEntryVar(Uventry);
hypre_SStructGridFindBoxManEntry(grid, part, index, var, &boxman_entry);
hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index, &row_coord,
matrix_type);
nUentries= hypre_SStructUVEntryNUEntries(Uventry);
for (k= 0; k< nUentries; k++)
{
to_part = hypre_SStructUVEntryToPart(Uventry, k);
to_rank = hypre_SStructUVEntryToRank(Uventry, k);
/*-----------------------------------------------------------
* store the row & col indices in the correct level.
*-----------------------------------------------------------*/
level = hypre_max( part_to_level[part], part_to_level[to_part] );
rows[level][ cnt[level] ]= row_coord;
cols[level][ cnt[level]++ ]= to_rank;
}
}
hypre_TFree(cnt);
for (level= 1; level<= max_level; level++)
{
vals = hypre_CTAlloc(HYPRE_Real, nrows[level]);
level_rows= hypre_TAlloc(HYPRE_Int, nrows[level]);
level_cols= hypre_TAlloc(HYPRE_Int, nrows[level]);
HYPRE_IJMatrixGetValues(ij_A, nrows[level], ncols[level], rows[level],
cols[level], vals);
Uventries = hypre_SStructGraphUVEntries(graph_level[level]);
/*-----------------------------------------------------------
* Find the rows & cols of the level ij_matrices where the
* extracted data must be placed. Note that because the
* order in which the HYPRE_SStructGraphAddEntries in the
* graph_level's is the same order in which rows[level] &
* cols[level] were formed, the coefficients in val are
* in the correct order.
*-----------------------------------------------------------*/
level_cnt= 0;
for (i= 0; i< hypre_SStructGraphNUVEntries(graph_level[level]); i++)
{
j = hypre_SStructGraphIUVEntry(graph_level[level], i);
Uventry= Uventries[j];
part = hypre_SStructUVEntryPart(Uventry);
hypre_CopyIndex(hypre_SStructUVEntryIndex(Uventry), index);
var = hypre_SStructUVEntryVar(Uventry);
hypre_SStructGridFindBoxManEntry(grid_level[level], part, index, var, &boxman_entry);
hypre_SStructBoxManEntryGetGlobalRank(boxman_entry, index, &row_coord, matrix_type);
nUentries= hypre_SStructUVEntryNUEntries(Uventry);
for (k= 0; k< nUentries; k++)
{
to_rank = hypre_SStructUVEntryToRank(Uventry, k);
level_rows[level_cnt] = row_coord;
level_cols[level_cnt++]= to_rank;
}
}
/*-----------------------------------------------------------
* Place the extracted ij coefficients into the level ij
* matrices.
*-----------------------------------------------------------*/
HYPRE_IJMatrixSetValues( hypre_SStructMatrixIJMatrix(A_level[level]),
nrows[level], ncols[level], (const HYPRE_Int *) level_rows,
(const HYPRE_Int *) level_cols, (const HYPRE_Real *) vals );
hypre_TFree(ncols[level]);
hypre_TFree(rows[level]);
hypre_TFree(cols[level]);
hypre_TFree(vals);
hypre_TFree(level_rows);
hypre_TFree(level_cols);
}
hypre_TFree(ncols);
hypre_TFree(rows);
hypre_TFree(cols);
hypre_TFree(nrows);
/*---------------------------------------------------------------
* Construct the fine grid (part 1) SStruct_PMatrix for all
* levels except for max_level. This involves coarsening the
* finer level SStruct_Matrix. Coarsening involves interpolation,
* matvec, and restriction (to obtain the "row-sum").
*---------------------------------------------------------------*/
matvec_data_level = hypre_TAlloc(void *, max_level+1);
pmatvec_data_level = hypre_TAlloc(void *, max_level+1);
interp_data_level = hypre_TAlloc(void *, max_level+1);
restrict_data_level= hypre_TAlloc(void *, max_level+1);
for (level= 0; level<= max_level; level++)
{
if (level < max_level)
{
hypre_FacSemiInterpCreate2(&interp_data_level[level]);
hypre_FacSemiInterpSetup2(interp_data_level[level],
x_level[level+1],
hypre_SStructVectorPVector(x_level[level], part_fine),
refine_factors[level+1]);
}
else
{
interp_data_level[level]= NULL;
}
if (level > 0)
{
hypre_FacSemiRestrictCreate2(&restrict_data_level[level]);
hypre_FacSemiRestrictSetup2(restrict_data_level[level],
x_level[level], part_crse, part_fine,
hypre_SStructVectorPVector(x_level[level-1], part_fine),
refine_factors[level]);
}
else
{
restrict_data_level[level]= NULL;
}
}
for (level= max_level; level> 0; level--)
{
/* hypre_FacZeroCFSten(hypre_SStructMatrixPMatrix(A_level[level], part_fine),
hypre_SStructMatrixPMatrix(A_level[level], part_crse),
grid_level[level],
part_fine,
refine_factors[level]);
hypre_FacZeroFCSten(hypre_SStructMatrixPMatrix(A_level[level], part_fine),
grid_level[level],
part_fine);
*/
hypre_ZeroAMRMatrixData(A_level[level], part_crse, refine_factors[level]);
HYPRE_SStructMatrixAssemble(A_level[level]);
/*------------------------------------------------------------
* create data structures that are needed for coarsening
-------------------------------------------------------------*/
hypre_SStructMatvecCreate(&matvec_data_level[level]);
hypre_SStructMatvecSetup(matvec_data_level[level],
A_level[level],
x_level[level]);
hypre_SStructPMatvecCreate(&pmatvec_data_level[level]);
hypre_SStructPMatvecSetup(pmatvec_data_level[level],
hypre_SStructMatrixPMatrix(A_level[level],part_fine),
hypre_SStructVectorPVector(x_level[level],part_fine));
}
/*---------------------------------------------------------------
* To avoid memory leaks, we cannot reference the coarsest level
* SStructPMatrix. We need only copy the stuctured coefs.
*---------------------------------------------------------------*/
pgrid= hypre_SStructGridPGrid(grid_level[0], part_fine);
nvars= hypre_SStructPGridNVars(pgrid);
A_pmatrix= hypre_SStructMatrixPMatrix(A_level[0], part_fine);
for (var1 = 0; var1 < nvars; var1++)
{
sgrid= hypre_SStructPGridSGrid(pgrid, var1);
sgrid_boxes= hypre_StructGridBoxes(sgrid);
max_box_volume= 0;
hypre_ForBoxI(i, sgrid_boxes)
{
sgrid_box = hypre_BoxArrayBox(sgrid_boxes, i);
box_volume= hypre_BoxVolume(sgrid_box);
max_box_volume= hypre_max(max_box_volume, box_volume);
}
values = hypre_TAlloc(HYPRE_Real, max_box_volume);
stencils= hypre_SStructGraphStencil(graph_level[0], part_fine, var1);
stencil_size= hypre_SStructStencilSize(stencils);
stencil_vars= hypre_SStructStencilVars(stencils);
for (i = 0; i < stencil_size; i++)
{
var2= stencil_vars[i];
A_smatrix= hypre_SStructPMatrixSMatrix(A_pmatrix, var1, var2);
hypre_CopyIndex(hypre_SStructStencilEntry(stencils, i), stencil_shape_i);
hypre_ForBoxI(j, sgrid_boxes)
{
sgrid_box= hypre_BoxArrayBox(sgrid_boxes, j);
box_start= hypre_BoxIMin(sgrid_box);
box_end = hypre_BoxIMax(sgrid_box);
A_smatrix_dbox= hypre_BoxArrayBox(hypre_StructMatrixDataSpace(A_smatrix), j);
A_smatrix_value=
hypre_StructMatrixExtractPointerByIndex(A_smatrix, j, stencil_shape_i);
hypre_BoxGetSize(sgrid_box, loop_size);
hypre_BoxLoop2Begin(ndim, loop_size,
sgrid_box, box_start, stride, k,
A_smatrix_dbox, box_start, stride, iA);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(HYPRE_BOX_PRIVATE,k,iA) HYPRE_SMP_SCHEDULE
#endif
hypre_BoxLoop2For(k, iA)
{
values[k]= A_smatrix_value[iA];
}
hypre_BoxLoop2End(k, iA);
HYPRE_SStructMatrixSetBoxValues(A_level[0], part_crse, box_start, box_end,
var1, 1, &i, values);
} /* hypre_ForBoxI */
} /* for i */
hypre_TFree(values);
} /* for var1 */
HYPRE_SStructMatrixAssemble(A_level[0]);
hypre_SStructMatvecCreate(&matvec_data_level[0]);
hypre_SStructMatvecSetup(matvec_data_level[0],
A_level[0],
x_level[0]);
hypre_SStructPMatvecCreate(&pmatvec_data_level[0]);
hypre_SStructPMatvecSetup(pmatvec_data_level[0],
hypre_SStructMatrixPMatrix(A_level[0],part_fine),
hypre_SStructVectorPVector(x_level[0],part_fine));
hypre_SStructMatvecCreate(&matvec_data);
hypre_SStructMatvecSetup(matvec_data, A_rap, x);
/*HYPRE_SStructVectorPrint("sstruct.out.b_l", b_level[max_level], 0);*/
/*HYPRE_SStructMatrixPrint("sstruct.out.A_l", A_level[max_level-2], 0);*/
(fac_data -> A_level) = A_level;
(fac_data -> matvec_data_level) = matvec_data_level;
(fac_data -> pmatvec_data_level) = pmatvec_data_level;
(fac_data -> matvec_data) = matvec_data;
(fac_data -> interp_data_level) = interp_data_level;
(fac_data -> restrict_data_level) = restrict_data_level;
/*---------------------------------------------------------------
* Create the fine patch relax_data structure.
*---------------------------------------------------------------*/
relax_data_level = hypre_TAlloc(void *, max_level+1);
for (level= 0; level<= max_level; level++)
{
relax_data_level[level]= hypre_SysPFMGRelaxCreate(comm);
hypre_SysPFMGRelaxSetTol(relax_data_level[level], 0.0);
hypre_SysPFMGRelaxSetType(relax_data_level[level], relax_type);
if (usr_jacobi_weight)
{
hypre_SysPFMGRelaxSetJacobiWeight(relax_data_level[level], jacobi_weight);
}
hypre_SysPFMGRelaxSetTempVec(relax_data_level[level], tx_level[level]);
hypre_SysPFMGRelaxSetup(relax_data_level[level],
hypre_SStructMatrixPMatrix(A_level[level], part_fine),
hypre_SStructVectorPVector(b_level[level], part_fine),
hypre_SStructVectorPVector(x_level[level], part_fine));
}
(fac_data -> relax_data_level) = relax_data_level;
/*---------------------------------------------------------------
* Create the coarsest composite level preconditioned solver.
* csolver_type= 1 multigrid-pcg
* csolver_type= 2 multigrid
*---------------------------------------------------------------*/
if (csolver_type == 1)
{
HYPRE_SStructPCGCreate(comm, &crse_solver);
HYPRE_PCGSetMaxIter((HYPRE_Solver) crse_solver, 1);
HYPRE_PCGSetTol((HYPRE_Solver) crse_solver, 1.0e-6);
HYPRE_PCGSetTwoNorm((HYPRE_Solver) crse_solver, 1);
/* use SysPFMG solver as preconditioner */
HYPRE_SStructSysPFMGCreate(comm, &crse_precond);
HYPRE_SStructSysPFMGSetMaxIter(crse_precond, 1);
HYPRE_SStructSysPFMGSetTol(crse_precond, 0.0);
HYPRE_SStructSysPFMGSetZeroGuess(crse_precond);
/* weighted Jacobi = 1; red-black GS = 2 */
HYPRE_SStructSysPFMGSetRelaxType(crse_precond, 3);
if (usr_jacobi_weight)
{
HYPRE_SStructFACSetJacobiWeight(crse_precond, jacobi_weight);
}
HYPRE_SStructSysPFMGSetNumPreRelax(crse_precond, 1);
HYPRE_SStructSysPFMGSetNumPostRelax(crse_precond, 1);
HYPRE_PCGSetPrecond((HYPRE_Solver) crse_solver,
(HYPRE_PtrToSolverFcn) HYPRE_SStructSysPFMGSolve,
(HYPRE_PtrToSolverFcn) HYPRE_SStructSysPFMGSetup,
(HYPRE_Solver) crse_precond);
HYPRE_PCGSetup((HYPRE_Solver) crse_solver,
(HYPRE_Matrix) A_level[0],
(HYPRE_Vector) b_level[0],
(HYPRE_Vector) x_level[0]);
}
else if (csolver_type == 2)
{
crse_precond= NULL;
HYPRE_SStructSysPFMGCreate(comm, &crse_solver);
HYPRE_SStructSysPFMGSetMaxIter(crse_solver, 1);
HYPRE_SStructSysPFMGSetTol(crse_solver, 1.0e-6);
HYPRE_SStructSysPFMGSetZeroGuess(crse_solver);
/* weighted Jacobi = 1; red-black GS = 2 */
HYPRE_SStructSysPFMGSetRelaxType(crse_solver, relax_type);
if (usr_jacobi_weight)
{
HYPRE_SStructFACSetJacobiWeight(crse_precond, jacobi_weight);
}
HYPRE_SStructSysPFMGSetNumPreRelax(crse_solver, 1);
HYPRE_SStructSysPFMGSetNumPostRelax(crse_solver, 1);
HYPRE_SStructSysPFMGSetup(crse_solver, A_level[0], b_level[0], x_level[0]);
}
(fac_data -> csolver) = crse_solver;
(fac_data -> cprecond) = crse_precond;
hypre_FacZeroCData(fac_vdata, A_rap);
return ierr;
}
|
keychain_fmt_plug.c | /* Mac OS X Keychain cracker patch for JtR. Hacked together during Summer of
* 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* * (c) 2004 Matt Johnston <matt @ ucc asn au>
* This code may be freely used and modified for any purpose. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_keychain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_keychain);
#else
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <openssl/des.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "pbkdf2_hmac_sha1.h"
#include "jumbo.h"
#include "memdbg.h"
#define FORMAT_LABEL "keychain"
#define FORMAT_NAME "Mac OS X Keychain"
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA1 3DES " SHA1_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA1 3DES 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 125
#define SALT_SIZE sizeof(*salt_struct)
#define BINARY_ALIGN 1
#define SALT_ALIGN 1
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA1
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
#define SALTLEN 20
#define IVLEN 8
#define CTLEN 48
static struct fmt_tests keychain_tests[] = {
{"$keychain$*10f7445c8510fa40d9ef6b4e0f8c772a9d37e449*f3d19b2a45cdcccb*8c3c3b1c7d48a24dad4ccbd4fd794ca9b0b3f1386a0a4527f3548bfe6e2f1001804b082076641bbedbc9f3a7c33c084b", "password"},
// these were generated with pass_gen.pl. NOTE, they ALL have the data (which gets encrypted) which was decrypted from the above hash.
{"$keychain$*a88cd6fbaaf40bc5437eee015a0f95ab8ab70545*b12372b1b7cb5c1f*1f5c596bcdd015afc126bc86f42dd092cb9d531d14a0aafaa89283f1bebace60562d497332afbd952fd329cc864144ec", "password"},
{"$keychain$*23328e264557b93204dc825c46a25f7fb1e17d4a*19a9efde2ca98d30*6ac89184134758a95c61bd274087ae0cffcf49f433c7f91edea98bd4fd60094e2936d99e4d985dec98284379f23259c0", "hhh"},
{"$keychain$*927717d8509db73aa47c5e820e3a381928b5e048*eef33a4a1483ae45*a52691580f17e295b8c2320947968503c605b2784bfe4851077782139f0de46f71889835190c361870baa56e2f4e9e43", "JtR-Jumbo"},
{"$keychain$*1fab88d0b8ea1a3d303e0aef519796eb29e46299*3358b0e77d60892f*286f975dcd191024227514ed9939d0fa94034294ba1eca6d5c767559e75e944b5a2fcb54fd696be64c64f9d069ce628a", "really long password -----------------------------"},
/* Sample keychain from OS X El Capitan, November of 2015 */
{"$keychain$*3a473dd308b1713ddc76fc976758eb543779a228*570b762ec2b177d0*a1f491231412ff74344244db4d98b1dab6e40a8fc63a11f0d5cdabf97fce5c4fa8ae0a1f95d0398d37e3d45e9fa07aa7", "El Capitan"},
{NULL}
};
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static struct custom_salt {
unsigned char salt[SALTLEN];
unsigned char iv[IVLEN];
unsigned char ct[CTLEN];
} *salt_struct;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
if (strncmp(ciphertext, "$keychain$*", 11) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 11;
if ((p = strtokm(ctcopy, "*")) == NULL) /* salt */
goto err;
if(hexlenl(p) != SALTLEN * 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if(hexlenl(p) != IVLEN * 2)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */
goto err;
if(hexlenl(p) != CTLEN * 2)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
ctcopy += 11; /* skip over "$keychain$*" */
salt_struct = mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
p = strtokm(ctcopy, "*");
for (i = 0; i < SALTLEN; i++)
salt_struct->salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < IVLEN; i++)
salt_struct->iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
for (i = 0; i < CTLEN; i++)
salt_struct->ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)salt_struct;
}
static void set_salt(void *salt)
{
salt_struct = (struct custom_salt *)salt;
}
static int kcdecrypt(unsigned char *key, unsigned char *iv, unsigned char *data)
{
unsigned char out[CTLEN];
DES_cblock key1, key2, key3;
DES_cblock ivec;
DES_key_schedule ks1, ks2, ks3;
memset(out, 0, sizeof(out));
memcpy(key1, key, 8);
memcpy(key2, key + 8, 8);
memcpy(key3, key + 16, 8);
DES_set_key((DES_cblock *) key1, &ks1);
DES_set_key((DES_cblock *) key2, &ks2);
DES_set_key((DES_cblock *) key3, &ks3);
memcpy(ivec, iv, 8);
DES_ede3_cbc_encrypt(data, out, CTLEN, &ks1, &ks2, &ks3, &ivec, DES_DECRYPT);
/* possible bug here, is this assumption (pad of 4) always valid? */
if (out[47] != 4 || check_pkcs_pad(out, CTLEN, 8) < 0)
return -1;
return 0;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
unsigned char master[MAX_KEYS_PER_CRYPT][32];
int i;
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT];
unsigned char *pin[MAX_KEYS_PER_CRYPT], *pout[MAX_KEYS_PER_CRYPT];
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[index+i]);
pin[i] = (unsigned char*)saved_key[index+i];
pout[i] = master[i];
}
pbkdf2_sha1_sse((const unsigned char**)pin, lens, salt_struct->salt, SALTLEN, 1000, pout, 24, 0);
#else
pbkdf2_sha1((unsigned char *)saved_key[index], strlen(saved_key[index]), salt_struct->salt, SALTLEN, 1000, master[0], 24, 0);
#endif
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
if(kcdecrypt(master[i], salt_struct->iv, salt_struct->ct) == 0)
cracked[index+i] = 1;
else
cracked[index+i] = 0;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void keychain_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_keychain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT,
{ NULL },
keychain_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
keychain_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
omp_hello.c | #include<stdio.h>
#include<stdlib.h>
#include<omp.h>
void usage(void) {
printf("usage: ./omp_hello [threads num]\n");
}
void Hello(void) {
int my_rank = omp_get_thread_num();
int thread_count = omp_get_num_threads();
printf("Hello from thread %d of %d\n", my_rank, thread_count);
}
int main(int argc, char** argv) {
if (argc < 2) {
fprintf(stderr, "arg error\n");
usage();
return 0;
}
int thread_count = strtol(argv[1], NULL, 10);
#pragma omp parallel num_threads(thread_count)
Hello();
return 0;
}
|
integrator.h | #ifndef INTEGRATOR_H
#define INTEGRATOR_H
#include <memory>
#include <chrono>
#include <omp.h>
#include "image.h"
#include "camera.h"
#include "scene.h"
#include "util.h"
class Integrator {
public:
std::shared_ptr<Image> image;
std::shared_ptr<Camera> camera;
std::shared_ptr<Sampler> sampler;
Integrator(const std::shared_ptr<Image>& _image, const std::shared_ptr<Camera>& _camera, const std::shared_ptr<Sampler>& _sampler) : image(_image), camera(_camera), sampler(_sampler) {};
virtual Vec3 integrate(const Ray& ray, const Scene& scene) = 0;
};
class PurePathTracing : public Integrator {
public:
int samples;
int maxDepth;
PurePathTracing(const std::shared_ptr<Image>& _image, const std::shared_ptr<Camera>& _camera, const std::shared_ptr<Sampler>& _sampler, int _samples, int _maxDepth=100) : Integrator(_image, _camera, _sampler), samples(_samples), maxDepth(_maxDepth) {};
Vec3 integrate(const Ray& initRay, const Scene& scene) {
Ray ray = initRay;
Vec3 throughput(1, 1, 1);
float roulette = 1;
Vec3 accumulated_color(0, 0, 0);
for(int i = 0; i < maxDepth; i++) {
//Russian Roulette
if(sampler->getNext() > roulette) {
break;
}
else {
throughput /= roulette;
roulette *= 0.99;
}
Hit res;
if(scene.intersect(ray, res)) {
//When ray hits arealight
if(res.hitPrimitive->hasLight()) {
accumulated_color += throughput * res.hitPrimitive->light->Le();
}
//Generate Local Coordinate Vectors
Vec3 n = res.hitNormal;
Vec3 s, t;
orthonormalBasis(n, s, t);
Vec3 wo = -ray.direction;
Vec3 wo_local = world2local(wo, s, n, t);
//BRDF Sampling
auto mat = res.hitPrimitive->material;
Vec3 wi_local;
float pdf_w;
Vec3 brdf_value = mat->sample(wo_local, res, *sampler, wi_local, pdf_w);
//Update throughput
throughput *= brdf_value * absCosTheta(wi_local) / pdf_w;
//Generate Next Ray
Vec3 wi = local2world(wi_local, s, n, t);
ray = Ray(res.hitPos, wi);
}
//When ray hits sky
else {
accumulated_color += throughput * Vec3(1, 1, 1);
break;
}
}
return accumulated_color;
};
void render(const Scene& scene) {
auto start_time = std::chrono::system_clock::now();
for(int k = 0; k < samples; k++) {
#pragma omp parallel for schedule(dynamic, 1)
for(int i = 0; i < image->height; i++) {
for(int j = 0; j < image->width; j++) {
float u = (2*(j + sampler->getNext()) - image->width)/image->height;
float v = (2*(i + sampler->getNext()) - image->height)/image->height;
Ray ray = camera->getRay(u, v);
image->addPixel(i, j, this->integrate(ray, scene));
if(omp_get_thread_num() == 0) {
int index = j + image->width*i + image->width*image->height*k;
std::cout << progressbar(index, image->width*image->height*samples) << " " << percentage(index, image->width*image->height*samples) << "\r" << std::flush;
}
}
}
}
auto end_time = std::chrono::system_clock::now();
auto msec = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
std::cout << "Rendering finished in " << msec << "ms" << std::endl;
image->divide(samples);
image->ppm_output("output.ppm");
};
};
class NEEPathTracing : public Integrator {
public:
int samples;
int maxDepth;
NEEPathTracing(const std::shared_ptr<Image>& _image, const std::shared_ptr<Camera>& _camera, const std::shared_ptr<Sampler>& _sampler, int _samples, int _maxDepth=100) : Integrator(_image, _camera, _sampler), samples(_samples), maxDepth(_maxDepth) {};
Vec3 integrate(const Ray& initRay, const Scene& scene) {
Ray ray = initRay;
Vec3 throughput(1, 1, 1);
float roulette = 1;
Vec3 accumulated_color(0, 0, 0);
for(int i = 0; i < maxDepth; i++) {
//Russian Roulette
if(sampler->getNext() > roulette) {
break;
}
else {
throughput /= roulette;
roulette *= 0.99;
}
Hit res;
if(scene.intersect(ray, res)) {
//Corner Case
if(i == 0 && res.hitPrimitive->hasLight()) {
accumulated_color += res.hitPrimitive->light->Le();
}
//Generate Local Coordinate Vectors
Vec3 n = res.hitNormal;
Vec3 s, t;
orthonormalBasis(n, s, t);
Vec3 wo = -ray.direction;
Vec3 wo_local = world2local(wo, s, n, t);
//light sampling
for(auto l : scene.lights) {
Vec3 sampledPos;
float pdf_A;
Vec3 le = l->sample(res, *sampler, sampledPos, pdf_A);
Vec3 wi = normalize(sampledPos - res.hitPos);
Hit shadow_res;
Ray shadowRay(res.hitPos, wi);
if(scene.intersect(shadowRay, shadow_res) && shadow_res.hitPrimitive->light == l) {
float pdf_w = (sampledPos - res.hitPos).length2() / std::abs(dot(-shadowRay.direction, shadow_res.hitNormal)) * pdf_A;
Vec3 wi_local = world2local(wi, s, n, t);
Vec3 brdf = res.hitPrimitive->material->BRDF(res, wo_local, wi_local);
accumulated_color += throughput * brdf * absCosTheta(wi_local) * le / pdf_w;
}
}
//BRDF sampling for next ray
auto mat = res.hitPrimitive->material;
Vec3 wi_local;
float pdf_w;
Vec3 brdf_value = mat->sample(wo_local, res, *sampler, wi_local, pdf_w);
//Update throughput
throughput *= brdf_value * absCosTheta(wi_local) / pdf_w;
//Generate Next Ray
Vec3 wi = local2world(wi_local, s, n, t);
ray = Ray(res.hitPos, wi);
}
else {
accumulated_color += throughput * Vec3(0, 0, 0);
break;
}
}
return accumulated_color;
};
void render(const Scene& scene) {
auto start_time = std::chrono::system_clock::now();
for(int k = 0; k < samples; k++) {
#pragma omp parallel for schedule(dynamic, 1)
for(int i = 0; i < image->height; i++) {
for(int j = 0; j < image->width; j++) {
float u = (2*(j + sampler->getNext()) - image->width)/image->height;
float v = (2*(i + sampler->getNext()) - image->height)/image->height;
Ray ray = camera->getRay(u, v);
image->addPixel(i, j, this->integrate(ray, scene));
if(omp_get_thread_num() == 0) {
int index = j + image->width*i + image->width*image->height*k;
std::cout << progressbar(index, image->width*image->height*samples) << " " << percentage(index, image->width*image->height*samples) << "\r" << std::flush;
}
}
}
}
auto end_time = std::chrono::system_clock::now();
auto msec = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time).count();
std::cout << "Rendering finished in " << msec << "ms" << std::endl;
image->divide(samples);
image->ppm_output("output.ppm");
};
};
#endif
|
gemver.orio.par.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#include "decl.h"
double rtclock()
{
struct timezone tzp;
struct timeval tp;
int stat;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main()
{
init_input_vars();
double annot_t_start=0, annot_t_end=0, annot_t_total=0;
int annot_i;
for (annot_i=0; annot_i<REPS; annot_i++)
{
annot_t_start = rtclock();
#ifdef DYNAMIC
int i,j;
for (i=0; i<=n-1; i=i+1) {
x[i]=0;
w[i]=0;
}
{
for (j=0; j<=n-1; j=j+1) {
double scv_9, scv_10, scv_11;
scv_9=u2[j];
scv_10=u1[j];
scv_11=y[j];
{
for (i=0; i<=n-3; i=i+3) {
double scv_1, scv_2, scv_3, scv_4, scv_5, scv_6;
scv_1=x[(i+1)];
scv_2=B[j*n+i];
scv_3=x[(i+2)];
scv_4=B[j*n+i+2];
scv_5=B[j*n+i+1];
scv_6=x[i];
scv_2=scv_9*v2[i]+scv_10*v1[i]+A[j*n+i];
scv_5=scv_9*v2[(i+1)]+scv_10*v1[(i+1)]+A[j*n+i+1];
scv_4=scv_9*v2[(i+2)]+scv_10*v1[(i+2)]+A[j*n+i+2];
scv_6=scv_11*scv_2+scv_6;
scv_1=scv_11*scv_5+scv_1;
scv_3=scv_11*scv_4+scv_3;
x[(i+1)]=scv_1;
B[j*n+i]=scv_2;
x[(i+2)]=scv_3;
B[j*n+i+2]=scv_4;
B[j*n+i+1]=scv_5;
x[i]=scv_6;
}
for (; i<=n-1; i=i+1) {
double scv_7, scv_8;
scv_7=x[i];
scv_8=B[j*n+i];
scv_8=scv_9*v2[i]+scv_10*v1[i]+A[j*n+i];
scv_7=scv_11*scv_8+scv_7;
x[i]=scv_7;
B[j*n+i]=scv_8;
}
}
}
}
{
#pragma omp parallel for private(i)
for (i=0; i<=n-1; i=i+1) {
x[i]=b*x[i]+z[i];
}
}
{
#pragma omp parallel for private(j,i)
for (i=0; i<=n-6; i=i+6) {
double scv_6, scv_7, scv_8, scv_9, scv_10, scv_11;
scv_6=w[i];
scv_7=w[(i+1)];
scv_8=w[(i+5)];
scv_9=w[(i+4)];
scv_10=w[(i+3)];
scv_11=w[(i+2)];
register int cbv_1;
cbv_1=n-4;
#pragma ivdep
#pragma vector always
for (j=0; j<=cbv_1; j=j+4) {
double scv_1, scv_2, scv_3, scv_4;
scv_1=x[j];
scv_2=x[(j+3)];
scv_3=x[(j+2)];
scv_4=x[(j+1)];
scv_6=scv_6+B[i*n+j]*scv_1;
scv_7=scv_7+B[(i+1)*n+j]*scv_1;
scv_11=scv_11+B[(i+2)*n+j]*scv_1;
scv_10=scv_10+B[(i+3)*n+j]*scv_1;
scv_9=scv_9+B[(i+4)*n+j]*scv_1;
scv_8=scv_8+B[(i+5)*n+j]*scv_1;
scv_6=scv_6+B[i*n+j+1]*scv_4;
scv_7=scv_7+B[(i+1)*n+j+1]*scv_4;
scv_11=scv_11+B[(i+2)*n+j+1]*scv_4;
scv_10=scv_10+B[(i+3)*n+j+1]*scv_4;
scv_9=scv_9+B[(i+4)*n+j+1]*scv_4;
scv_8=scv_8+B[(i+5)*n+j+1]*scv_4;
scv_6=scv_6+B[i*n+j+2]*scv_3;
scv_7=scv_7+B[(i+1)*n+j+2]*scv_3;
scv_11=scv_11+B[(i+2)*n+j+2]*scv_3;
scv_10=scv_10+B[(i+3)*n+j+2]*scv_3;
scv_9=scv_9+B[(i+4)*n+j+2]*scv_3;
scv_8=scv_8+B[(i+5)*n+j+2]*scv_3;
scv_6=scv_6+B[i*n+j+3]*scv_2;
scv_7=scv_7+B[(i+1)*n+j+3]*scv_2;
scv_11=scv_11+B[(i+2)*n+j+3]*scv_2;
scv_10=scv_10+B[(i+3)*n+j+3]*scv_2;
scv_9=scv_9+B[(i+4)*n+j+3]*scv_2;
scv_8=scv_8+B[(i+5)*n+j+3]*scv_2;
}
register int cbv_2;
cbv_2=n-1;
#pragma ivdep
#pragma vector always
for (; j<=cbv_2; j=j+1) {
double scv_5;
scv_5=x[j];
scv_6=scv_6+B[i*n+j]*scv_5;
scv_7=scv_7+B[(i+1)*n+j]*scv_5;
scv_11=scv_11+B[(i+2)*n+j]*scv_5;
scv_10=scv_10+B[(i+3)*n+j]*scv_5;
scv_9=scv_9+B[(i+4)*n+j]*scv_5;
scv_8=scv_8+B[(i+5)*n+j]*scv_5;
}
scv_6=a*scv_6;
scv_7=a*scv_7;
scv_11=a*scv_11;
scv_10=a*scv_10;
scv_9=a*scv_9;
scv_8=a*scv_8;
w[i]=scv_6;
w[(i+1)]=scv_7;
w[(i+5)]=scv_8;
w[(i+4)]=scv_9;
w[(i+3)]=scv_10;
w[(i+2)]=scv_11;
}
for (i=n-((n-1)%6)-1; i<=n-1; i=i+1) {
double scv_12;
scv_12=w[i];
{
register int cbv_3;
cbv_3=n-4;
#pragma ivdep
#pragma vector always
for (j=0; j<=cbv_3; j=j+4) {
scv_12=scv_12+B[i*n+j]*x[j];
scv_12=scv_12+B[i*n+j+1]*x[(j+1)];
scv_12=scv_12+B[i*n+j+2]*x[(j+2)];
scv_12=scv_12+B[i*n+j+3]*x[(j+3)];
}
register int cbv_4;
cbv_4=n-1;
#pragma ivdep
#pragma vector always
for (; j<=cbv_4; j=j+1) {
scv_12=scv_12+B[i*n+j]*x[j];
}
}
scv_12=a*scv_12;
w[i]=scv_12;
}
}
#else
{
int i,j,it,jt;
for (i=0; i<=n-1; i=i+1) {
x[i]=0;
w[i]=0;
}
for (j=0; j<=n-1; j=j+1) {
for (i=0; i<=n-1; i=i+1) {
B[j][i]=u2[j]*v2[i]+u1[j]*v1[i]+A[j][i];
x[i]=y[j]*B[j][i]+x[i];
}
}
for (i=0; i<=n-1; i=i+1) {
x[i]=b*x[i]+z[i];
}
{
#pragma omp parallel for private(i,j)
for (i=0; i<=n-1; i=i+1) {
double scv_1;
scv_1=w[i];
for (j=0; j<=n-1; j=j+1) {
scv_1=scv_1+B[i][j]*x[j];
}
scv_1=a*scv_1;
w[i]=scv_1;
}
}
}
#endif
annot_t_end = rtclock();
annot_t_total += annot_t_end - annot_t_start;
}
annot_t_total = annot_t_total / REPS;
#ifndef TEST
printf("%f\n", annot_t_total);
#else
{
int i, j;
for (i=0; i<n; i++) {
if (i%100==0)
printf("\n");
printf("%f ",w[i]);
}
}
#endif
return ((int) w[0]);
}
|
boxloop_cuda.h | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* Header info for the BoxLoop
*
*****************************************************************************/
/*--------------------------------------------------------------------------
* BoxLoop macros:
*--------------------------------------------------------------------------*/
#ifndef HYPRE_NEWBOXLOOP_HEADER
#define HYPRE_NEWBOXLOOP_HEADER
#include <cuda.h>
#include <cuda_runtime.h>
#ifdef HYPRE_USING_OPENMP
#include <omp.h>
#endif
#define HYPRE_LAMBDA [=] __host__ __device__
#define BLOCKSIZE 512
typedef struct hypre_Boxloop_struct
{
HYPRE_Int lsize0,lsize1,lsize2;
HYPRE_Int strides0,strides1,strides2;
HYPRE_Int bstart0,bstart1,bstart2;
HYPRE_Int bsize0,bsize1,bsize2;
} hypre_Boxloop;
#if 1
#define hypre_fence()
/*printf("\n hypre_newBoxLoop in %s(%d) function %s\n",__FILE__,__LINE__,__FUNCTION__);*/
#else
#define hypre_fence() \
{ \
cudaError err = cudaGetLastError(); \
if ( cudaSuccess != err ) \
{ \
printf("\n ERROR hypre_newBoxLoop: %s in %s(%d) function %s\n",cudaGetErrorString(err),__FILE__,__LINE__,__FUNCTION__); \
/* HYPRE_Int *p = NULL; *p = 1; */ \
} \
hypre_CheckErrorDevice(cudaDeviceSynchronize()); \
}
#endif
/* #define hypre_reduce_policy cuda_reduce<BLOCKSIZE> */
extern "C++" {
template <typename LOOP_BODY>
__global__ void forall_kernel(LOOP_BODY loop_body, HYPRE_Int length)
{
HYPRE_Int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < length)
{
loop_body(idx);
}
}
template<typename LOOP_BODY>
void BoxLoopforall(HYPRE_Int policy, HYPRE_Int length, LOOP_BODY loop_body)
{
if (policy == HYPRE_MEMORY_HOST)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (HYPRE_Int idx = 0; idx < length; idx++)
{
loop_body(idx);
}
}
else if (policy == HYPRE_MEMORY_DEVICE)
{
HYPRE_Int gridSize = (length + BLOCKSIZE - 1) / BLOCKSIZE;
if (gridSize == 0)
{
gridSize = 1;
}
forall_kernel<<<gridSize, BLOCKSIZE>>>(loop_body, length);
}
else if (policy == 2)
{
}
}
template <typename LOOP_BODY>
__global__ void reductionforall_kernel(LOOP_BODY ReductionLoop,
HYPRE_Int length)
{
ReductionLoop(blockDim.x*blockIdx.x+threadIdx.x, blockDim.x*gridDim.x, length);
}
template<typename LOOP_BODY>
void ReductionBoxLoopforall(HYPRE_Int policy, HYPRE_Int length, LOOP_BODY ReductionLoop)
{
if (length <= 0)
{
return;
}
if (policy == HYPRE_MEMORY_HOST)
{
}
else if (policy == HYPRE_MEMORY_DEVICE)
{
HYPRE_Int gridSize = (length + BLOCKSIZE - 1) / BLOCKSIZE;
gridSize = hypre_min(gridSize, 1024);
/*
hypre_printf("length= %d, blocksize = %d, gridsize = %d\n",
length, BLOCKSIZE, gridSize);
*/
reductionforall_kernel<<<gridSize, BLOCKSIZE>>>(ReductionLoop, length);
}
}
}
#define hypre_BoxLoopIncK(k,box,hypre__i) \
HYPRE_Int hypre_boxD##k = 1; \
HYPRE_Int hypre__i = 0; \
hypre__i += (hypre_IndexD(local_idx, 0)*box.strides0 + box.bstart0) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize0 + 1); \
hypre__i += (hypre_IndexD(local_idx, 1)*box.strides1 + box.bstart1) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize1 + 1); \
hypre__i += (hypre_IndexD(local_idx, 2)*box.strides2 + box.bstart2) * hypre_boxD##k; \
hypre_boxD##k *= hypre_max(0, box.bsize2 + 1);
#define hypre_newBoxLoopInit(ndim,loop_size) \
HYPRE_Int hypre__tot = 1; \
for (HYPRE_Int hypre_d = 0;hypre_d < ndim;hypre_d ++) \
hypre__tot *= loop_size[hypre_d];
#define hypre_BasicBoxLoopInit(ndim,loop_size) \
HYPRE_Int hypre__tot = 1; \
for (HYPRE_Int hypre_d = 0;hypre_d < ndim;hypre_d ++) \
hypre__tot *= loop_size[hypre_d]; \
#define hypre_newBoxLoopDeclare(box) \
hypre_Index local_idx; \
HYPRE_Int idx_local = idx; \
hypre_IndexD(local_idx, 0) = idx_local % box.lsize0; \
idx_local = idx_local / box.lsize0; \
hypre_IndexD(local_idx, 1) = idx_local % box.lsize1; \
idx_local = idx_local / box.lsize1; \
hypre_IndexD(local_idx, 2) = idx_local % box.lsize2; \
#define hypre_newBoxLoop0Begin(ndim, loop_size) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{
#define hypre_newBoxLoop0End() \
}); \
hypre_fence(); \
}
#define hypre_BoxLoopDataDeclareK(k,ndim,loop_size,dbox,start,stride) \
hypre_Boxloop databox##k; \
databox##k.lsize0 = loop_size[0]; \
databox##k.strides0 = stride[0]; \
databox##k.bstart0 = start[0] - dbox->imin[0]; \
databox##k.bsize0 = dbox->imax[0]-dbox->imin[0]; \
if (ndim > 1) \
{ \
databox##k.lsize1 = loop_size[1]; \
databox##k.strides1 = stride[1]; \
databox##k.bstart1 = start[1] - dbox->imin[1]; \
databox##k.bsize1 = dbox->imax[1]-dbox->imin[1]; \
} \
else \
{ \
databox##k.lsize1 = 1; \
databox##k.strides1 = 0; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
if (ndim == 3) \
{ \
databox##k.lsize2 = loop_size[2]; \
databox##k.strides2 = stride[2]; \
databox##k.bstart2 = start[2] - dbox->imin[2]; \
databox##k.bsize2 = dbox->imax[2]-dbox->imin[2]; \
} \
else \
{ \
databox##k.lsize2 = 1; \
databox##k.strides2 = 0; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
}
#define hypre_newBoxLoop1Begin(ndim, loop_size, \
dbox1, start1, stride1, i1) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1);
#define hypre_newBoxLoop1End(i1) \
}); \
hypre_fence(); \
}
#define hypre_newBoxLoop2Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2);
#define hypre_newBoxLoop2End(i1, i2) \
}); \
hypre_fence(); \
}
#define hypre_newBoxLoop3Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
hypre_BoxLoopDataDeclareK(3,ndim,loop_size,dbox3,start3,stride3); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2); \
hypre_BoxLoopIncK(3,databox3,i3);
#define hypre_newBoxLoop3End(i1, i2,i3) \
}); \
hypre_fence(); \
}
#define hypre_newBoxLoop4Begin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
dbox3, start3, stride3, i3, \
dbox4, start4, stride4, i4) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
hypre_BoxLoopDataDeclareK(3,ndim,loop_size,dbox3,start3,stride3); \
hypre_BoxLoopDataDeclareK(4,ndim,loop_size,dbox4,start4,stride4); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2); \
hypre_BoxLoopIncK(3,databox3,i3); \
hypre_BoxLoopIncK(4,databox4,i4);
#define hypre_newBoxLoop4End(i1, i2, i3, i4) \
}); \
hypre_fence(); \
}
#define zypre_BasicBoxLoopDataDeclareK(k,ndim,loop_size,stride) \
hypre_Boxloop databox##k; \
databox##k.lsize0 = loop_size[0]; \
databox##k.strides0 = stride[0]; \
databox##k.bstart0 = 0; \
databox##k.bsize0 = 0; \
if (ndim > 1) \
{ \
databox##k.lsize1 = loop_size[1]; \
databox##k.strides1 = stride[1]; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
else \
{ \
databox##k.lsize1 = 1; \
databox##k.strides1 = 0; \
databox##k.bstart1 = 0; \
databox##k.bsize1 = 0; \
} \
if (ndim == 3) \
{ \
databox##k.lsize2 = loop_size[2]; \
databox##k.strides2 = stride[2]; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
} \
else \
{ \
databox##k.lsize2 = 1; \
databox##k.strides2 = 0; \
databox##k.bstart2 = 0; \
databox##k.bsize2 = 0; \
}
#define zypre_newBasicBoxLoop1Begin(ndim, loop_size, \
stride1, i1) \
{ \
hypre_BasicBoxLoopInit(ndim,loop_size); \
zypre_BasicBoxLoopDataDeclareK(1,ndim,loop_size,stride1); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
#define zypre_newBasicBoxLoop2Begin(ndim, loop_size, \
stride1, i1, \
stride2, i2) \
{ \
hypre_BasicBoxLoopInit(ndim,loop_size); \
zypre_BasicBoxLoopDataDeclareK(1,ndim,loop_size,stride1); \
zypre_BasicBoxLoopDataDeclareK(2,ndim,loop_size,stride2); \
BoxLoopforall(hypre_exec_policy,hypre__tot,HYPRE_LAMBDA (HYPRE_Int idx) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2); \
#define hypre_LoopBegin(size,idx) \
{ \
BoxLoopforall(hypre_exec_policy,size,HYPRE_LAMBDA (HYPRE_Int idx) \
{
#define hypre_LoopEnd() \
}); \
hypre_fence(); \
}
#define hypre_newBoxLoopGetIndex(index) \
index[0] = hypre_IndexD(local_idx, 0); index[1] = hypre_IndexD(local_idx, 1); index[2] = hypre_IndexD(local_idx, 2);
#define hypre_BoxLoopGetIndex zypre_BoxLoopGetIndex
#define hypre_BoxLoopSetOneBlock() ;
#define hypre_BoxLoopBlock() 0
#define hypre_BoxLoop0Begin hypre_newBoxLoop0Begin
#define hypre_BoxLoop0For hypre_newBoxLoop0For
#define hypre_BoxLoop0End hypre_newBoxLoop0End
#define hypre_BoxLoop1Begin hypre_newBoxLoop1Begin
#define hypre_BoxLoop1For hypre_newBoxLoop1For
#define hypre_BoxLoop1End hypre_newBoxLoop1End
#define hypre_BoxLoop2Begin hypre_newBoxLoop2Begin
#define hypre_BoxLoop2For hypre_newBoxLoop2For
#define hypre_BoxLoop2End hypre_newBoxLoop2End
#define hypre_BoxLoop3Begin hypre_newBoxLoop3Begin
#define hypre_BoxLoop3For hypre_newBoxLoop3For
#define hypre_BoxLoop3End hypre_newBoxLoop3End
#define hypre_BoxLoop4Begin hypre_newBoxLoop4Begin
#define hypre_BoxLoop4For hypre_newBoxLoop4For
#define hypre_BoxLoop4End hypre_newBoxLoop4End
#define hypre_BasicBoxLoop1Begin zypre_newBasicBoxLoop1Begin
#define hypre_BasicBoxLoop2Begin zypre_newBasicBoxLoop2Begin
/* Reduction BoxLoop1*/
#define hypre_BoxLoop1ReductionBegin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
reducesum) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
reducesum.nblocks = hypre_min( (hypre__tot+BLOCKSIZE-1)/BLOCKSIZE, 1024 ); \
ReductionBoxLoopforall(hypre_exec_policy, hypre__tot, \
HYPRE_LAMBDA (HYPRE_Int tid, HYPRE_Int nthreads, \
HYPRE_Int len) \
{ \
for (HYPRE_Int idx = tid; \
idx < len; \
idx += nthreads) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1);
#define hypre_BoxLoop1ReductionEnd(i1, reducesum) \
} \
reducesum.BlockReduce(); \
}); \
hypre_fence(); \
}
/* Reduction BoxLoop2 */
#define hypre_BoxLoop2ReductionBegin(ndim, loop_size, \
dbox1, start1, stride1, i1, \
dbox2, start2, stride2, i2, \
reducesum) \
{ \
hypre_newBoxLoopInit(ndim,loop_size); \
hypre_BoxLoopDataDeclareK(1,ndim,loop_size,dbox1,start1,stride1); \
hypre_BoxLoopDataDeclareK(2,ndim,loop_size,dbox2,start2,stride2); \
reducesum.nblocks = hypre_min( (hypre__tot+BLOCKSIZE-1)/BLOCKSIZE, 1024 ); \
ReductionBoxLoopforall(hypre_exec_policy, hypre__tot, \
HYPRE_LAMBDA (HYPRE_Int tid, HYPRE_Int nthreads, \
HYPRE_Int len) \
{ \
for (HYPRE_Int idx = tid; \
idx < len; \
idx += nthreads) \
{ \
hypre_newBoxLoopDeclare(databox1); \
hypre_BoxLoopIncK(1,databox1,i1); \
hypre_BoxLoopIncK(2,databox2,i2);
#define hypre_BoxLoop2ReductionEnd(i1, i2, reducesum) \
} \
reducesum.BlockReduce(); \
}); \
hypre_fence(); \
}
#endif
|
racf_fmt_plug.c | /* RACF cracker patch for JtR. Hacked together during March of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com> .
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Thanks to Nigel Pentland <nigel at nigelpentland.net>, author of CRACF for
* providing algorithm details.
*
* Thanks to Main Framed <mainframed767 at gmail.com> for providing test vectors,
* algorithm details and requesting the RACF cracker in the first place.
*
* racfdump format => userid:$racf$*userid*deshash
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_racf;
#elif FMT_REGISTERS_H
john_register_one(&fmt_racf);
#else
#include <openssl/des.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "crc32.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // tuned K8-dual HT
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define FORMAT_LABEL "RACF"
#define FORMAT_NAME ""
#define ALGORITHM_NAME "DES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 8
#define CIPHERTEXT_LENGTH 16
#define BINARY_SIZE 8
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static const unsigned char a2e[256] = {
0, 1, 2, 3, 55, 45, 46, 47, 22, 5, 37, 11, 12, 13, 14, 15,
16, 17, 18, 19, 60, 61, 50, 38, 24, 25, 63, 39, 28, 29, 30, 31,
64, 79,127,123, 91,108, 80,125, 77, 93, 92, 78,107, 96, 75, 97,
240,241,242,243,244,245,246,247,248,249,122, 94, 76,126,110,111,
124,193,194,195,196,197,198,199,200,201,209,210,211,212,213,214,
215,216,217,226,227,228,229,230,231,232,233, 74,224, 90, 95,109,
121,129,130,131,132,133,134,135,136,137,145,146,147,148,149,150,
151,152,153,162,163,164,165,166,167,168,169,192,106,208,161, 7,
32, 33, 34, 35, 36, 21, 6, 23, 40, 41, 42, 43, 44, 9, 10, 27,
48, 49, 26, 51, 52, 53, 54, 8, 56, 57, 58, 59, 4, 20, 62,225,
65, 66, 67, 68, 69, 70, 71, 72, 73, 81, 82, 83, 84, 85, 86, 87,
88, 89, 98, 99,100,101,102,103,104,105,112,113,114,115,116,117,
118,119,120,128,138,139,140,141,142,143,144,154,155,156,157,158,
159,160,170,171,172,173,174,175,176,177,178,179,180,181,182,183,
184,185,186,187,188,189,190,191,202,203,204,205,206,207,218,219,
220,221,222,223,234,235,236,237,238,239,250,251,252,253,254,255
};
/* This is a2e[] with each entry XOR 0x55, left-shifted one bit
and finally with odd parity so that DES_set_key_unchecked
can be used directly. This provides about 15% speed up. */
static const unsigned char a2e_precomputed[256] = {
171, 168, 174, 173, 196, 241, 247, 244, 134, 161, 224, 188, 179, 176, 182, 181,
138, 137, 143, 140, 211, 208, 206, 230, 155, 152, 213, 229, 146, 145, 151, 148,
42, 52, 84, 93, 28, 115, 11, 81, 49, 16, 19, 55, 124, 107, 61, 104,
74, 73, 79, 76, 67, 64, 70, 69, 91, 88, 94, 22, 50, 87, 118, 117,
82, 41, 47, 44, 35, 32, 38, 37, 59, 56, 8, 14, 13, 2, 1, 7,
4, 26, 25, 110, 109, 98, 97, 103, 100, 122, 121, 62, 107, 31, 21, 112,
88, 168, 174, 173, 162, 161, 167, 164, 186, 185, 137, 143, 140, 131, 128, 134,
133, 155, 152, 239, 236, 227, 224, 230, 229, 251, 248, 42, 127, 11, 233, 164,
234, 233, 239, 236, 227, 128, 167, 133, 251, 248, 254, 253, 242, 185, 191, 157,
203, 200, 158, 205, 194, 193, 199, 186, 218, 217, 223, 220, 162, 131, 214, 104,
41, 47, 44, 35, 32, 38, 37, 59, 56, 8, 14, 13, 2, 1, 7, 4,
26, 25, 110, 109, 98, 97, 103, 100, 122, 121, 74, 73, 79, 76, 67, 64,
70, 69, 91, 171, 191, 188, 179, 176, 182, 181, 138, 158, 157, 146, 145, 151,
148, 234, 254, 253, 242, 241, 247, 244, 203, 200, 206, 205, 194, 193, 199, 196,
218, 217, 223, 220, 211, 208, 214, 213, 62, 61, 50, 49, 55, 52, 31, 28,
19, 16, 22, 21, 127, 124, 115, 112, 118, 117, 94, 93, 82, 81, 87, 84
};
/* in-place ascii2ebcdic conversion */
static void ascii2ebcdic(unsigned char *str)
{
int i;
int n = strlen((const char*)str);
for (i = 0; i < n; ++i)
str[i] = a2e[str[i]];
}
/* replace missing characters in userid by EBCDIC spaces (0x40) */
static void process_userid(unsigned char *str)
{
int i;
for (i = strlen((const char*)str); i < 8; ++i)
str[i] = 0x40;
str[8] = 0; /* terminate string */
}
#ifdef RACF_DEBUG
static void print_hex(unsigned char *str, int len)
{
int i;
for (i = 0; i < len; ++i)
printf("%02x", str[i]);
printf("\n");
}
#endif
static struct fmt_tests racf_tests[] = {
{"$racf$*AAAAAAA*CA2E330B2FD1820E", "AAAAAAAA"},
{"$racf$*AAAAAAAA*062314297C496E0E", "AAAAAAAA"},
{"$racf$*JJJJJJJJ*8B5F0B1D0826D927", "TESTTEST"},
{"$racf$*TTTTTTTT*424B258AF8B9061B", "TESTTEST"},
{"$racf$*A*0F7DE80335E8ED68", "A"},
{"$racf$*OPEN3*EC76FC0DEF5B0A83", "SYS1"},
{"$racf$*TESTTEST*0FF48804F759193F", "TESTTEST"},
{"$racf$*SYSOPR*83845F8EEC7C20D8", "SYSOPR"},
{"$racf$*TCPIP*657889CD0F5D40DF", "SYS1"},
{"$racf$*TESTER*E05AB770EA048421", "TEST"},
{NULL}
};
static struct custom_salt {
unsigned char userid[8 + 1];
} *cur_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static DES_key_schedule (*schedules);
static int dirty;
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
schedules = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*schedules));
}
static void done(void)
{
MEM_FREE(schedules);
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
if (strncmp(ciphertext, "$racf$*", 7))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 7;
p = strtokm(ctcopy, "*"); /* username */
if(!p)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* hash */
goto err;
if (hexlenu(p) != CIPHERTEXT_LENGTH)
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy, *username;
static struct custom_salt cs;
ctcopy += 7; /* skip over "$racf$*" */
username = strtokm(ctcopy, "*");
/* process username */
strncpy((char*)cs.userid, username, 8);
cs.userid[8] = 0; // terminate username at 8 bytes
ascii2ebcdic(cs.userid);
process_userid(cs.userid);
#ifdef RACF_DEBUG
printf("userid in EBCDIC : ");
print_hex(cs.userid, 8);
#endif
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '*') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
if (dirty) {
DES_cblock des_key;
int i;
/* process key */
for(i = 0; saved_key[index][i]; i++)
des_key[i] = a2e_precomputed[ARCH_INDEX(saved_key[index][i])];
/* replace missing characters in userid by (EBCDIC space (0x40) XOR 0x55) << 1 */
while(i < 8)
des_key[i++] = 0x2a;
DES_set_key_unchecked(&des_key, &schedules[index]);
}
/* do encryption */
DES_ecb_encrypt((const_DES_cblock*)cur_salt->userid, (DES_cblock*)crypt_out[index], &schedules[index], DES_ENCRYPT);
}
dirty = 0;
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void racf_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > 8)
saved_len = 8;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
dirty = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
struct fmt_main fmt_racf = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_TRUNC | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD,
{ NULL },
racf_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
racf_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unaryop__ainv_fp32_int32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_fp32_int32
// op(A') function: GB_tran__ainv_fp32_int32
// C type: float
// A type: int32_t
// cast: float cij = (float) aij
// unaryop: cij = -aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FP32 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_fp32_int32
(
float *restrict Cx,
const int32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_fp32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
DRB011-minusminus-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The -- operation on numNodes2 is not protected, causing data race.
Data race pair: numNodes2@74:7 vs. numNodes2@74:7
*/
#include "omprace.h"
#include <omp.h>
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char* argv[])
{
omprace_init();
int i;
int len=100;
int numNodes=len, numNodes2=0;
int x[100];
// initialize x[]
for (i=0; i< len; i++)
{
if (i%2==0)
x[i]=5;
else
x[i]= -5;
}
#pragma omp parallel for
for (i=numNodes-1 ; i>-1 ; --i) {
if (x[i]<=0) {
numNodes2-- ;
}
}
printf ("numNodes2 = %d\n", numNodes2);
omprace_fini();
return 0;
}
|
edge_data_c2c.h | /*
==============================================================================
KratosPFEMApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: antonia $
// Date: $Date: 2009-01-14 08:26:51 $
// Revision: $Revision: 1.11 $
//
//
#if !defined(KRATOS_EDGE_DATA_C2C_H_INCLUDED )
#define KRATOS_EDGE_DATA_C2C_H_INCLUDED
//we suggest defining the following macro
#define USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION
//we suggest defining the following macro*/*/
// #define USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
//#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "incompressible_fluid_application.h"
#include "utilities/openmp_utils.h"
namespace Kratos
{
// template<unsigned int TDim>
// class EdgeConstructionScratch
// {
// public:
// array_1d<double, TDim+1> N;
// boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim> dN_dx;
// double volume;
// double weighting_factor = 1.0 / static_cast<double>(TDim+1);
// boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> mass_consistent;
// array_1d<double, TDim+1> mass_lumped;
// array_1d<unsigned int, TDim+1> nodal_indices;
// array_1d<double, TDim+1> heights;
//
// }
//structure definition for fast access to edge data using CSR format
template<unsigned int TDim>
class EdgesStructureTypeC2C
{
public:
//component ij of the consistent mass matrix (M = Ni * Nj * dOmega)
double Mass;
//components kl of the laplacian matrix of edge ij (L = dNi/dxk * dNj/dxl * dOmega)
//double Laplacian;
boost::numeric::ublas::bounded_matrix<double, TDim, TDim> LaplacianIJ;
//components k of the gradient matrix of edge ij (G = Ni * dNj/dxl * dOmega)
array_1d<double, TDim> Ni_DNj;
//components k of the transposed gradient matrix of edge ij (GT = dNi/dxl * Nj * dOmega)
//TRANSPOSED GRADIENT
array_1d<double, TDim> DNi_Nj;
//*************************************************************************************
//*************************************************************************************
//gradient integrated by parts
//RHSi += DNi_Nj pj + Aboundary * pext ==> RHS += Ni_DNj p_j - DNi_Nj p_i
//ATTENTION: + Aboundary * pext is NOT included!! it should be included "manually"
inline void Add_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination[comp] -= Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i;
}
inline void Sub_Gp(array_1d<double, TDim>& destination, const double& p_i, const double& p_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination[comp] += Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i;
}
//*************************************************************************************
//*************************************************************************************
//gradient
//RHSi += Ni_DNj[k]*v[k]
inline void Add_D_v(double& destination,
const array_1d<double, TDim>& v_i,
const array_1d<double, TDim>& v_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination += Ni_DNj[comp] * (v_j[comp] - v_i[comp]);
}
inline void Sub_D_v(double& destination,
const array_1d<double, TDim>& v_i,
const array_1d<double, TDim>& v_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination -= Ni_DNj[comp] * (v_j[comp] - v_i[comp]);
}
//*************************************************************************************
//*************************************************************************************
//gradient
//RHSi += Ni_DNj pj
inline void Add_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination[comp] += Ni_DNj[comp] * (p_j - p_i);
}
inline void Sub_grad_p(array_1d<double, TDim>& destination, const double& p_i, const double& p_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination[comp] -= Ni_DNj[comp] * (p_j - p_i);
}
//*************************************************************************************
//*************************************************************************************
//gradient
//RHSi += DNi_Nj[k]*v[k]
inline void Add_div_v(double& destination,
const array_1d<double, TDim>& v_i,
const array_1d<double, TDim>& v_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination -= Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp];
}
inline void Sub_div_v(double& destination,
const array_1d<double, TDim>& v_i,
const array_1d<double, TDim>& v_j)
{
for (unsigned int comp = 0; comp < TDim; comp++)
destination += Ni_DNj[comp] * v_j[comp] - DNi_Nj[comp] * v_i[comp];
}
//*************************************************************************************
//*************************************************************************************
//gets the trace of the laplacian matrix
inline void CalculateScalarLaplacian(double& l_ij)
{
l_ij = LaplacianIJ(0, 0);
for (unsigned int comp = 1; comp < TDim; comp++)
l_ij += LaplacianIJ(comp, comp);
}
inline void Add_ConvectiveContribution(array_1d<double, TDim>& destination,
const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i,
const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j)
{
// #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION
// double temp = a_i[0] * Ni_DNj[0];
// for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
// temp += a_i[k_comp] * Ni_DNj[k_comp];
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// destination[l_comp] += temp * (U_j[l_comp] - U_i[l_comp]);
// #else
// double aux_i = a_i[0] * Ni_DNj[0];
// double aux_j = a_j[0] * Ni_DNj[0];
// for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
// {
// aux_i += a_i[k_comp] * Ni_DNj[k_comp];
// aux_j += a_j[k_comp] * Ni_DNj[k_comp];
// }
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// destination[l_comp] += aux_j * U_j[l_comp] - aux_i * U_i[l_comp];
// #endif
// for (unsigned int comp = 0; comp < TDim; comp++)
// destination[comp] -= Ni_DNj[comp] * p_j - DNi_Nj[comp] * p_i;
double second = a_i[0] * DNi_Nj[0];
double first = a_j[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
{
second += a_i[k_comp] * DNi_Nj[k_comp];
first += a_j[k_comp] * Ni_DNj[k_comp];
}
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] += first * U_j[l_comp] - second * U_i[l_comp];
}
inline void Sub_ConvectiveContribution(array_1d<double, TDim>& destination,
const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i,
const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j)
{
// #ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION
// double temp = a_i[0] * Ni_DNj[0];
// for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
// temp += a_i[k_comp] * Ni_DNj[k_comp];
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// destination[l_comp] -= temp * (U_j[l_comp] - U_i[l_comp]);
// #else
// double aux_i = a_i[0] * Ni_DNj[0];
// double aux_j = a_j[0] * Ni_DNj[0];
// for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
// {
// aux_i += a_i[k_comp] * Ni_DNj[k_comp];
// aux_j += a_j[k_comp] * Ni_DNj[k_comp];
// }
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// destination[l_comp] -= aux_j * U_j[l_comp] - aux_i * U_i[l_comp];
// #endif
double second = a_i[0] * DNi_Nj[0];
double first = a_j[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
{
second += a_i[k_comp] * DNi_Nj[k_comp];
first += a_j[k_comp] * Ni_DNj[k_comp];
}
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] -= first * U_j[l_comp] - second * U_i[l_comp];
}
inline void Sub_ConvectiveContribution(double& destination,
const array_1d<double, TDim>& a_i, const double& phi_i,
const array_1d<double, TDim>& a_j, const double& phi_j)
{
#ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION
double temp = a_i[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
temp += a_i[k_comp] * Ni_DNj[k_comp];
destination -= temp * (phi_j - phi_i);
#else
double aux_i = a_i[0] * Ni_DNj[0];
double aux_j = a_j[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
{
aux_i += a_i[k_comp] * Ni_DNj[k_comp];
aux_j += a_j[k_comp] * Ni_DNj[k_comp];
}
destination -= aux_j * phi_j - aux_i * phi_i;
#endif
// double second = a_i[0] * DNi_Nj[0];
// double first = a_j[0] * Ni_DNj[0];
// for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
// {
// second += a_i[k_comp] * DNi_Nj[k_comp];
// first += a_j[k_comp] * Ni_DNj[k_comp];
// }
// destination -= first * phi_j - second * phi_i;
}
inline void Add_ConvectiveContribution(double& destination,
const array_1d<double, TDim>& a_i, const double& phi_i,
const array_1d<double, TDim>& a_j, const double& phi_j)
{
#ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION
double temp = a_i[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
temp += a_i[k_comp] * Ni_DNj[k_comp];
destination += temp * (phi_j - phi_i);
#else
double aux_i = a_i[0] * Ni_DNj[0];
double aux_j = a_j[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
{
aux_i += a_i[k_comp] * Ni_DNj[k_comp];
aux_j += a_j[k_comp] * Ni_DNj[k_comp];
}
destination += aux_j * phi_j - aux_i * phi_i;
#endif
// double second = a_i[0] * DNi_Nj[0];
// double first = a_j[0] * Ni_DNj[0];
// for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
// {
// second += a_i[k_comp] * DNi_Nj[k_comp];
// first += a_j[k_comp] * Ni_DNj[k_comp];
// }
// destination += first * phi_j - second * phi_i;
}
//*************************************************************************************
//*************************************************************************************
inline void CalculateConvectionStabilization_LOW(array_1d<double, TDim>& stab_low,
const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& U_i,
const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& U_j)
{
double conv_stab = 0.0;
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp);
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]);
// double temp = 0.0;
// double lij = 0.0;
// for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
// {
// lij += LaplacianIJ(k_comp,k_comp);
// temp = a_i[k_comp] * a_i[k_comp];
// }
//
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// stab_low[l_comp] = temp * lij * (U_j[l_comp] - U_i[l_comp]);
}
// inline void CalculateConvectionStabilization_LOW( array_1d<double,TDim>& stab_low,
// const array_1d<double,TDim>& a_i, const array_1d<double,TDim>& U_i, const double& p_i,
// const array_1d<double,TDim>& a_j, const array_1d<double,TDim>& U_j, const double& p_j
// )
// {
// double conv_stab = 0.0;
// for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
// for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
// conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp,m_comp);
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// stab_low[l_comp] = conv_stab * (U_j[l_comp] - U_i[l_comp]);
//
//// adding pressure
// double press_diff = p_j-p_i;
// for (unsigned int j_comp = 0; j_comp < TDim; j_comp++)
// {
// for (unsigned int i_comp = 0; i_comp < TDim; i_comp++)
// stab_low[j_comp] -= a_i[i_comp] * LaplacianIJ(i_comp,j_comp) * press_diff ;
// }
//
//
// }
inline void CalculateConvectionStabilization_LOW(double& stab_low,
const array_1d<double, TDim>& a_i, const double& phi_i,
const array_1d<double, TDim>& a_j, const double& phi_j)
{
double conv_stab = 0.0;
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
for (unsigned int m_comp = 0; m_comp < TDim; m_comp++)
conv_stab += a_i[k_comp] * a_i[m_comp] * LaplacianIJ(k_comp, m_comp);
stab_low = conv_stab * (phi_j - phi_i);
}
//*************************************************************************************
//*************************************************************************************
inline void CalculateConvectionStabilization_HIGH(array_1d<double, TDim>& stab_high,
const array_1d<double, TDim>& a_i, const array_1d<double, TDim>& pi_i,
const array_1d<double, TDim>& a_j, const array_1d<double, TDim>& pi_j)
{
#ifdef USE_CONSERVATIVE_FORM_FOR_VECTOR_CONVECTION
double temp = 0.0;
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
temp += a_i[k_comp] * Ni_DNj[k_comp];
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
stab_high[l_comp] = -temp * (pi_j[l_comp] - pi_i[l_comp]); //check if the minus sign is correct
// double temp_i = 0.0;
// double temp_j = 0.0;
// for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
// {
// temp_j += a_i[k_comp] * Ni_DNj[k_comp];
// temp_i += a_i[k_comp] * DNi_Nj[k_comp];
// }
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// stab_high[l_comp] = +(temp_j*pi_j[l_comp] - temp_i*pi_i[l_comp]); //check if the minus sign is correct
// double temp_i = 0.0;
// double temp_j = 0.0;
// for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
// {
// temp_i += a_i[k_comp] * Ni_DNj[k_comp];
// temp_j += a_i[k_comp] * DNi_Nj[k_comp];
// }
// for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
// stab_high[l_comp] = (temp_j*pi_j[l_comp] + temp_i*pi_i[l_comp]); //check if the minus sign is correct
#else
double aux_i = a_i[0] * Ni_DNj[0];
double aux_j = a_j[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
{
aux_i += a_i[k_comp] * Ni_DNj[k_comp];
aux_j += a_j[k_comp] * Ni_DNj[k_comp];
}
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
stab_high[l_comp] = -(aux_j * pi_j[l_comp] - aux_i * pi_i[l_comp]);
#endif
}
inline void CalculateConvectionStabilization_HIGH(double& stab_high,
const array_1d<double, TDim>& a_i, const double& pi_i,
const array_1d<double, TDim>& a_j, const double& pi_j)
{
#ifdef USE_CONSERVATIVE_FORM_FOR_SCALAR_CONVECTION
double temp = 0.0;
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
temp += a_i[k_comp] * Ni_DNj[k_comp];
stab_high = -temp * (pi_j - pi_i); //check if the minus sign is correct
#else
double aux_i = a_i[0] * Ni_DNj[0];
double aux_j = a_j[0] * Ni_DNj[0];
for (unsigned int k_comp = 1; k_comp < TDim; k_comp++)
{
aux_i += a_i[k_comp] * Ni_DNj[k_comp];
aux_j += a_j[k_comp] * Ni_DNj[k_comp];
}
stab_high = -(aux_j * pi_j - aux_i * pi_i);
#endif
}
//*************************************************************************************
//*************************************************************************************
inline void Add_StabContribution(array_1d<double, TDim>& destination,
const double tau, const double beta,
const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high)
{
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] += tau * (stab_low[l_comp] - beta * stab_high[l_comp]);
}
inline void Add_StabContribution(double& destination,
const double tau, const double beta,
const double& stab_low, const double& stab_high)
{
destination += tau * (stab_low - beta * stab_high);
}
inline void Sub_StabContribution(array_1d<double, TDim>& destination,
const double tau, const double beta,
const array_1d<double, TDim>& stab_low, const array_1d<double, TDim>& stab_high)
{
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] -= tau * (stab_low[l_comp] - beta * stab_high[l_comp]);
}
inline void Sub_StabContribution(double& destination,
const double tau, const double beta,
const double& stab_low, const double& stab_high)
{
destination -= tau * (stab_low - beta * stab_high);
}
//*************************************************************************************
//*************************************************************************************
inline void Add_ViscousContribution(array_1d<double, TDim>& destination,
const array_1d<double, TDim>& U_i, const double& nu_i,
const array_1d<double, TDim>& U_j, const double& nu_j)
{
//calculate scalar laplacian
double L = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
L += LaplacianIJ(l_comp, l_comp);
//double nu_avg = 0.5*(nu_i+nu_j);
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] += nu_i * L * (U_j[l_comp] - U_i[l_comp]);
}
inline void Sub_ViscousContribution(array_1d<double, TDim>& destination,
const array_1d<double, TDim>& U_i, const double& nu_i,
const array_1d<double, TDim>& U_j, const double& nu_j)
{
//calculate scalar laplacian
double L = 0.0;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
L += LaplacianIJ(l_comp, l_comp);
//double nu_avg = 0.5*(nu_i+nu_j);
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
destination[l_comp] -= nu_i * L * (U_j[l_comp] - U_i[l_comp]);
}
};
//class definition of matrices using CSR format
template<unsigned int TDim, class TSparseSpace>
class MatrixContainerC2C
{
public:
//name for the self defined structure
typedef EdgesStructureTypeC2C<TDim> CSR_Tuple;
typedef vector<CSR_Tuple> EdgesVectorType;
//name for row start and column index vectors
typedef vector<unsigned int> IndicesVectorType;
//names for separately stored node based values
typedef vector<double> ValuesVectorType;
// typedef std::vector< array_1d<double,TDim> > CalcVectorType;
typedef vector< array_1d<double, TDim> > CalcVectorType;
//constructor and destructor
MatrixContainerC2C()
{
};
~MatrixContainerC2C()
{
};
//functions to return private values
inline unsigned int GetNumberEdges()
{
return mNumberEdges;
}
inline EdgesVectorType& GetEdgeValues()
{
return mNonzeroEdgeValues;
}
inline IndicesVectorType& GetColumnIndex()
{
return mColumnIndex;
}
inline IndicesVectorType& GetRowStartIndex()
{
return mRowStartIndex;
}
inline ValuesVectorType& GetLumpedMass()
{
return mLumpedMassMatrix;
}
inline ValuesVectorType& GetInvertedMass()
{
return mInvertedMassMatrix;
}
inline CalcVectorType& GetDiagGradient()
{
return mDiagGradientMatrix;
}
inline ValuesVectorType& GetHmin()
{
return mHmin;
}
//********************************************************
//function to size and initialize the vector of CSR tuples
void ConstructCSRVector(ModelPart& model_part)
{
KRATOS_TRY
//SIZE OF CSR VECTOR
//defining the number of nodes and edges
int n_nodes = model_part.Nodes().size();
//remark: no colouring algorithm is used here (symmetry is neglected)
// respectively edge ij is considered different from edge ji
mNumberEdges = 0;
//counter to assign and get global nodal index
int i_node = 0;
//counting the edges connecting the nodes
for (typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin(); node_it != model_part.NodesEnd(); node_it++)
{
//counting neighbours of each node
mNumberEdges += (node_it->GetValue(NEIGHBOUR_NODES)).size();
//DIAGONAL TERMS
//mNumberEdges++;
//assigning global index to each node
node_it->FastGetSolutionStepValue(AUX_INDEX) = static_cast<double> (i_node++);
}
//error message in case number of nodes does not coincide with number of indices
if (i_node != n_nodes)
KRATOS_WATCH("ERROR - Highest nodal index doesn't coincide with number of nodes!");
//allocating memory for block of CSR data - setting to zero for first-touch OpenMP allocation
mNonzeroEdgeValues.resize(mNumberEdges); //SetToZero(mNonzeroEdgeValues);
mColumnIndex.resize(mNumberEdges); //SetToZero(mColumnIndex);
mRowStartIndex.resize(n_nodes + 1); //SetToZero(mRowStartIndex);
mLumpedMassMatrix.resize(n_nodes);
SetToZero(mLumpedMassMatrix);
mInvertedMassMatrix.resize(n_nodes);
SetToZero(mInvertedMassMatrix);
mDiagGradientMatrix.resize(n_nodes);
SetToZero(mDiagGradientMatrix);
mHmin.resize(n_nodes);
SetToZero(mHmin);
//INITIALIZING OF THE CSR VECTOR
//temporary variable as the row start index of a node depends on the number of neighbours of the previous one
unsigned int row_start_temp = 0;
int number_of_threads = OpenMPUtils::GetNumThreads();
std::vector<int> row_partition(number_of_threads);
OpenMPUtils::DivideInPartitions(model_part.Nodes().size(), number_of_threads, row_partition);
for (int k = 0; k < number_of_threads; k++)
{
#pragma omp parallel
if (OpenMPUtils::ThisThread() == k)
{
for (unsigned int aux_i = static_cast<unsigned int> (row_partition[k]); aux_i < static_cast<unsigned int> (row_partition[k + 1]); aux_i++)
{
typename ModelPart::NodesContainerType::iterator node_it = model_part.NodesBegin() + aux_i;
//main loop over all nodes
// for (typename ModelPart::NodesContainerType::iterator node_it=model_part.NodesBegin(); node_it!=model_part.NodesEnd(); node_it++)
// {
//getting the global index of the node
i_node = static_cast<unsigned int> (node_it->FastGetSolutionStepValue(AUX_INDEX));
//determining its neighbours
WeakPointerVector< Node < 3 > >& neighb_nodes = node_it->GetValue(NEIGHBOUR_NODES);
//number of neighbours of node i determines row start index for the following node
unsigned int n_neighbours = neighb_nodes.size();
//DIAGONAL TERMS
//n_neighbours++;
//reserving memory for work array
std::vector<unsigned int> work_array;
work_array.reserve(n_neighbours);
//DIAGONAL TERMS
//work_array.push_back(i_node);
//nested loop over the neighbouring nodes
for (WeakPointerVector< Node < 3 > >::iterator neighb_it = neighb_nodes.begin(); neighb_it != neighb_nodes.end(); neighb_it++)
{
//getting global index of the neighbouring node
work_array.push_back(static_cast<unsigned int> (neighb_it->FastGetSolutionStepValue(AUX_INDEX)));
}
//reordering neighbours following their global indices
std::sort(work_array.begin(), work_array.end());
//setting current row start index
mRowStartIndex[i_node] = row_start_temp;
//nested loop over the by now ordered neighbours
for (unsigned int counter = 0; counter < n_neighbours; counter++)
{
//getting global index of the neighbouring node
unsigned int j_neighbour = work_array[counter];
//calculating CSR index
unsigned int csr_index = mRowStartIndex[i_node] + counter;
//saving column index j of the original matrix
mColumnIndex[csr_index] = j_neighbour;
//initializing the CSR vector entries with zero
mNonzeroEdgeValues[csr_index].Mass = 0.0;
//mNonzeroEdgeValues[csr_index].Laplacian = 0.0;
noalias(mNonzeroEdgeValues[csr_index].LaplacianIJ) = ZeroMatrix(TDim, TDim);
noalias(mNonzeroEdgeValues[csr_index].Ni_DNj) = ZeroVector(TDim);
//TRANSPOSED GRADIENT
noalias(mNonzeroEdgeValues[csr_index].DNi_Nj) = ZeroVector(TDim);
}
//preparing row start index for next node
row_start_temp += n_neighbours;
}
}
}
//adding last entry (necessary for abort criterion of loops)
mRowStartIndex[n_nodes] = mNumberEdges;
//INITIALIZING NODE BASED VALUES
//lumped mass matrix (elements Mi)
/* #pragma omp parallel for
for (int i_node=0; i_node<n_nodes; i_node++)
mLumpedMassMatrix[i_node] = 0.0;*/
#pragma omp parallel for
//set the heights to a huge number
for (int i_node = 0; i_node < n_nodes; i_node++)
mHmin[i_node] = 1e10;
//diagonal of gradient matrix (elements Gii)
// #pragma omp parallel for
// for (int i_node=0; i_node<n_nodes; i_node++)
// noalias(mDiagGradientMatrix[i_node]) = ZeroVector(TDim);
KRATOS_CATCH("")
}
//*********************************
//function to precalculate CSR data
void BuildCSRData(ModelPart& model_part)
{
KRATOS_TRY
//PRECALCULATING CSR DATA
//defining temporary local variables for elementwise addition
//shape functions
array_1d<double, TDim + 1 > N;
//shape function derivatives
boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim> dN_dx;
//volume
double volume;
//weighting factor
double weighting_factor = 1.0 / static_cast<double> (TDim + 1);
//elemental matrices
boost::numeric::ublas::bounded_matrix <double, TDim + 1, TDim + 1 > mass_consistent;
//boost::numeric::ublas::bounded_matrix <double, TDim+1,TDim+1> laplacian;
array_1d<double, TDim + 1 > mass_lumped;
//global indices of elemental nodes
array_1d<unsigned int, TDim + 1 > nodal_indices;
array_1d<double, TDim + 1 > heights;
//loop over all elements
for (typename ModelPart::ElementsContainerType::iterator elem_it = model_part.ElementsBegin(); elem_it != model_part.ElementsEnd(); elem_it++)
{
//LOCAL ELEMENTWISE CALCULATIONS
//getting geometry data of the element
GeometryUtils::CalculateGeometryData(elem_it->GetGeometry(), dN_dx, N, volume);
//calculate lenght of the heights of the element
for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
{
heights[ie_node] = dN_dx(ie_node, 0) * dN_dx(ie_node, 0);
for (unsigned int comp = 1; comp < TDim; comp++)
{
heights[ie_node] += dN_dx(ie_node, comp) * dN_dx(ie_node, comp);
}
heights[ie_node] = 1.0 / sqrt(heights[ie_node]);
// KRATOS_WATCH(heights);
}
//setting up elemental mass matrices
CalculateMassMatrix(mass_consistent, volume);
noalias(mass_lumped) = ZeroVector(TDim + 1);
for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
{
for (unsigned int je_node = 0; je_node <= TDim; je_node++)
{
//mass_consistent(ie_node,je_node) = N(ie_node) * N(je_node) * volume;
mass_lumped[ie_node] += mass_consistent(ie_node, je_node);
}
//mass_lumped[ie_node] = volume * N[ie_node];
}
/*OLD DATA STRUCTURE
//calculating elemental laplacian matrix
noalias(laplacian) = ZeroMatrix(TDim+1,TDim+1);
for (unsigned int ie_node=0; ie_node<=TDim; ie_node++)
for (unsigned int je_node=ie_node+1; je_node<=TDim; je_node++)
//componentwise multiplication
for (unsigned int component=0; component<TDim; component++)
{
//taking advantage of symmetry
double temp = dN_dx(ie_node,component) * dN_dx(je_node,component) * volume;
laplacian(ie_node,je_node) += temp;
laplacian(je_node,ie_node) += temp;
}
//multiply gradient with volume referring to each gauss point
dN_dx *= (volume / double(TDim+1));*/
//(corresponding to Ni * dOmega respectively Nj * dOmega)
double weighted_volume = volume * weighting_factor;
//ASSEMBLING GLOBAL DATA STRUCTURE
//loop over the nodes of the element to determine their global indices
for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
nodal_indices[ie_node] = static_cast<unsigned int> (elem_it->GetGeometry()[ie_node].FastGetSolutionStepValue(AUX_INDEX));
//assembling global "edge matrices" by adding local contributions
for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
{
//check the heights and change the value if minimal is found
if (mHmin[ nodal_indices[ie_node] ] > heights[ie_node])
mHmin[ nodal_indices[ie_node] ] = heights[ie_node];
for (unsigned int je_node = 0; je_node <= TDim; je_node++)
{
//remark: there is no edge linking node i with itself!
//DIAGONAL TERMS
if (ie_node != je_node)
{
//calculating CSR index from global index
unsigned int csr_index = GetCSRIndex(nodal_indices[ie_node], nodal_indices[je_node]);
//assigning precalculated element data to the referring edges
//contribution to edge mass
mNonzeroEdgeValues[csr_index].Mass += mass_consistent(ie_node, je_node);
//contribution to edge laplacian
/*OLD DATA STRUCTURE
mNonzeroEdgeValues[csr_index].Laplacian = laplacian(ie_node,je_node);*/
boost::numeric::ublas::bounded_matrix <double, TDim, TDim>& laplacian = mNonzeroEdgeValues[csr_index].LaplacianIJ;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
laplacian(l_comp, k_comp) += dN_dx(ie_node, l_comp) * dN_dx(je_node, k_comp) * volume;
//contribution to edge gradient
array_1d<double, TDim>& gradient = mNonzeroEdgeValues[csr_index].Ni_DNj;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
//gradient[l_comp] += dN_dx(je_node,l_comp);
gradient[l_comp] += dN_dx(je_node, l_comp) * weighted_volume;
//TRANSPOSED GRADIENT
//contribution to transposed edge gradient
array_1d<double, TDim>& transp_gradient = mNonzeroEdgeValues[csr_index].DNi_Nj;
for (unsigned int l_comp = 0; l_comp < TDim; l_comp++)
//transp_gradient[l_comp] += dN_dx(ie_node,l_comp);
transp_gradient[l_comp] += dN_dx(ie_node, l_comp) * weighted_volume;
}
}
}
//assembling node based vectors
for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
//diagonal of the global lumped mass matrix
mLumpedMassMatrix[nodal_indices[ie_node]] += mass_lumped[ie_node];
for (unsigned int ie_node = 0; ie_node <= TDim; ie_node++)
{
//diagonal of the global gradient matrix
array_1d<double, TDim>& gradient = mDiagGradientMatrix[nodal_indices[ie_node]];
for (unsigned int component = 0; component < TDim; component++)
//gradient[component] += dN_dx(ie_node,component);
gradient[component] += dN_dx(ie_node, component) * weighted_volume;
}
}
//copy mass matrix to inverted mass matrix
for (unsigned int inode = 0; inode < mLumpedMassMatrix.size(); inode++)
{
mInvertedMassMatrix[inode] = mLumpedMassMatrix[inode];
}
//perform MPI syncronization between the domains
//calculating inverted mass matrix (this requires syncronization for MPI paraellelism
for (unsigned int inode = 0; inode < mInvertedMassMatrix.size(); inode++)
{
mInvertedMassMatrix[inode] = 1.0 / mInvertedMassMatrix[inode];
}
KRATOS_CATCH("")
}
//******************************************
//function to calculate CSR index of edge ij
unsigned int GetCSRIndex(unsigned int NodeI, unsigned int NeighbourJ)
{
KRATOS_TRY
//index indicating data position of edge ij
unsigned int csr_index;
//searching for coincidence of stored column index and neighbour index j
for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++)
if (mColumnIndex[csr_index] == NeighbourJ)
break;
//returning CSR index of edge ij
return csr_index;
KRATOS_CATCH("")
}
//***********************************************
//function to get pointer to CSR tuple of edge ij
CSR_Tuple* GetTuplePointer(unsigned int NodeI, unsigned int NeighbourJ)
{
KRATOS_TRY
//index indicating data position of edge ij
unsigned int csr_index;
//searching for coincidence of stored column index and neighbour index j
for (csr_index = mRowStartIndex[NodeI]; csr_index != mRowStartIndex[NodeI + 1]; csr_index++)
if (mColumnIndex[csr_index] == NeighbourJ)
break;
//returning pointer to CSR tuple of edge ij
return &mNonzeroEdgeValues[csr_index];
KRATOS_CATCH("")
}
//*******************************
//function to free dynamic memory
void Clear()
{
KRATOS_TRY
mNonzeroEdgeValues.clear();
mColumnIndex.clear();
mRowStartIndex.clear();
mInvertedMassMatrix.clear();
mLumpedMassMatrix.clear();
mDiagGradientMatrix.clear();
mHmin.clear();
KRATOS_CATCH("")
}
//****************************
//functions to access database
//(note that this is already thought for parallel;
// for a single processor this could be done in a faster way)
void FillCoordinatesFromDatabase(CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//loop over all nodes
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
#pragma omp parallel for firstprivate(n_nodes, it_begin)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node = i;
//save value in the destination vector
for (unsigned int component = 0; component < TDim; component++)
(rDestination[i_node])[component] = (*node_it)[component];
}
KRATOS_CATCH("");
}
//****************************
//functions to access database
//(note that this is already thought for parallel;
// for a single processor this could be done in a faster way)
void FillVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//loop over all nodes
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
#pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node = i;
//get the requested value in vector form
array_1d<double, 3 > & vector = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos);
//save value in the destination vector
for (unsigned int component = 0; component < TDim; component++)
(rDestination[i_node])[component] = vector[component];
}
KRATOS_CATCH("");
}
void FillOldVectorFromDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rDestination, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//loop over all nodes
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
#pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node = i;
//get the requested value in vector form
array_1d<double, 3 > & vector = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos);
//save value in the destination vector
for (unsigned int component = 0; component < TDim; component++)
(rDestination[i_node])[component] = vector[component];
}
KRATOS_CATCH("");
}
void FillScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//loop over all nodes
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
#pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node = i;
//get the requested scalar value
double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos);
//save value in the destination vector
rDestination[i_node] = scalar;
}
KRATOS_CATCH("");
}
void FillOldScalarFromDatabase(Variable<double>& rVariable, ValuesVectorType& rDestination, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
#pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node = i;
//get the requested scalar value
double& scalar = node_it->FastGetSolutionStepValue(rVariable, 1, var_pos);
//save value in the destination vector
rDestination[i_node] = scalar;
}
KRATOS_CATCH("");
}
void WriteVectorToDatabase(Variable<array_1d<double, 3 > >& rVariable, CalcVectorType& rOrigin, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//loop over all nodes
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
#pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
unsigned int i_node = i;
//get reference of destination
array_1d<double, 3 > & vector = node_it->FastGetCurrentSolutionStepValue(rVariable, var_pos);
//save vector in database
for (unsigned int component = 0; component < TDim; component++)
vector[component] = (rOrigin[i_node])[component];
}
KRATOS_CATCH("");
}
void WriteScalarToDatabase(Variable<double>& rVariable, ValuesVectorType& rOrigin, ModelPart::NodesContainerType& rNodes)
{
KRATOS_TRY
//loop over all nodes
int n_nodes = rNodes.size();
ModelPart::NodesContainerType::iterator it_begin = rNodes.begin();
unsigned int var_pos = it_begin->pGetVariablesList()->Index(rVariable);
#pragma omp parallel for firstprivate(n_nodes, it_begin,var_pos)
for (int i = 0; i < n_nodes; i++)
{
ModelPart::NodesContainerType::iterator node_it = it_begin + i;
//get the global index of node i
// // unsigned int i_node = static_cast<unsigned int>(node_it->FastGetSolutionStepValue(AUX_INDEX));
int i_node = i;
//get reference of destination
double& scalar = node_it-> FastGetCurrentSolutionStepValue(rVariable, var_pos);
//save scalar in database
scalar = rOrigin[i_node];
}
KRATOS_CATCH("");
}
//*********************************************************************
//destination = origin1 + value * Minv*origin
void Add_Minv_value(
CalcVectorType& destination,
const CalcVectorType& origin1,
const double value,
const ValuesVectorType& Minv_vec,
const CalcVectorType& origin
)
{
KRATOS_TRY
int loop_size = destination.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
array_1d<double, TDim>& dest = destination[i_node];
const double m_inv = Minv_vec[i_node];
const array_1d<double, TDim>& origin_vec1 = origin1[i_node];
const array_1d<double, TDim>& origin_value = origin[i_node];
double temp = value * m_inv;
for (unsigned int comp = 0; comp < TDim; comp++)
dest[comp] = origin_vec1[comp] + temp * origin_value[comp];
}
KRATOS_CATCH("")
}
void Add_Minv_value(
ValuesVectorType& destination,
const ValuesVectorType& origin1,
const double value,
const ValuesVectorType& Minv_vec,
const ValuesVectorType& origin
)
{
KRATOS_TRY
int loop_size = destination.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
double& dest = destination[i_node];
const double m_inv = Minv_vec[i_node];
const double& origin_vec1 = origin1[i_node];
const double& origin_value = origin[i_node];
double temp = value * m_inv;
dest = origin_vec1 + temp * origin_value;
}
KRATOS_CATCH("")
}
//**********************************************************************
void AllocateAndSetToZero(CalcVectorType& data_vector, int size)
{
data_vector.resize(size);
int loop_size = size;
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
array_1d<double, TDim>& aaa = data_vector[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
aaa[comp] = 0.0;
}
}
void AllocateAndSetToZero(ValuesVectorType& data_vector, int size)
{
data_vector.resize(size);
int loop_size = size;
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
data_vector[i_node] = 0.0;
;
}
}
//**********************************************************************
void SetToZero(CalcVectorType& data_vector)
{
int loop_size = data_vector.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
array_1d<double, TDim>& aaa = data_vector[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
aaa[comp] = 0.0;
}
}
void SetToZero(ValuesVectorType& data_vector)
{
int loop_size = data_vector.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
data_vector[i_node] = 0.0;
;
}
}
//**********************************************************************
void AssignVectorToVector(const CalcVectorType& origin,
CalcVectorType& destination
)
{
int loop_size = origin.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
const array_1d<double, TDim>& orig = origin[i_node];
array_1d<double, TDim>& dest = destination[i_node];
for (unsigned int comp = 0; comp < TDim; comp++)
dest[comp] = orig[comp];
}
}
void AssignVectorToVector(const ValuesVectorType& origin,
ValuesVectorType& destination
)
{
int loop_size = origin.size();
#pragma omp parallel for
for (int i_node = 0; i_node < loop_size; i_node++)
{
destination[i_node] = origin[i_node];
}
}
private:
//number of edges
unsigned int mNumberEdges;
//CSR data vector for storage of the G, L and consistent M components of edge ij
EdgesVectorType mNonzeroEdgeValues;
//vector to store column indices of nonzero matrix elements for each row
IndicesVectorType mColumnIndex;
//index vector to access the start of matrix row i in the column vector
IndicesVectorType mRowStartIndex;
//inverse of the mass matrix ... for parallel calculation each subdomain should contain this correctly calculated (including contributions of the neighbours)
ValuesVectorType mInvertedMassMatrix;
//minimum height around one node
ValuesVectorType mHmin;
//lumped mass matrix (separately stored due to lack of diagonal elements of the consistent mass matrix)
ValuesVectorType mLumpedMassMatrix;
//diagonal of the gradient matrix (separately stored due to special calculations)
CalcVectorType mDiagGradientMatrix;
//*******************************************
//functions to set up elemental mass matrices
void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 3, 3 > & mass_consistent, double volume)
{
for (unsigned int i_node = 0; i_node <= TDim; i_node++)
{
//diagonal terms
mass_consistent(i_node, i_node) = 0.16666666666666666667 * volume; //1/6
//non-diagonal terms
double temp = 0.08333333333333333333 * volume; // 1/12
for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++)
{
//taking advantage of symmetry
mass_consistent(i_node, j_neighbour) = temp;
mass_consistent(j_neighbour, i_node) = temp;
}
}
}
void CalculateMassMatrix(boost::numeric::ublas::bounded_matrix<double, 4, 4 > & mass_consistent, double volume)
{
for (unsigned int i_node = 0; i_node <= TDim; i_node++)
{
//diagonal terms
mass_consistent(i_node, i_node) = 0.1 * volume;
//non-diagonal terms
double temp = 0.05 * volume;
for (unsigned int j_neighbour = i_node + 1; j_neighbour <= TDim; j_neighbour++)
{
//taking advantage of symmetry
mass_consistent(i_node, j_neighbour) = temp;
mass_consistent(j_neighbour, i_node) = temp;
}
}
}
};
} //namespace Kratos
#endif //KRATOS_EDGE_DATA_C2C_H_INCLUDED defined
|
julia.c | /*
* mandelbrot.c
*
* @author: phdenzel
*
* The Julia set generator
*
*/
#include "julia.h"
#include "complex.h"
#include "graphics.h"
#include <SDL2/SDL.h>
/* Global variables */
const int NMAX = 255;
int WIDTH = 640;
int HEIGHT = 480;
//double ORIGIN[2] = {-0.75, 0.};
double ORIGIN[2] = {0., 0.};
double PROJ[2] = {4, 4*0.75};
// the first index is being used for mouse events
complex_t cconst[9] = { {0., 0.},
{-0.8, 0.156},
{-0.7269, 0.1889},
{-0.61803398875, 0.},
{-0.4, 0.6},
{0.285, 0.01},
{-0.70176, 0.3842},
{-0.835, -0.2321},
{0, -0.8} };
/* Functions */
complex_t iterator(complex_t* z, complex_t* c) {
complex_t z2 = complex_squarecpy(z);
complex_t fz = complex_addcpy(&z2, c);
return fz;
}
int mandelbrot(complex_t* z, complex_t* c, double x, double y) {
c->real = linearMap(x, 0, WIDTH, ORIGIN[0]-PROJ[0]/2, ORIGIN[0]+PROJ[0]/2);
c->imag = linearMap(y, 0, HEIGHT, ORIGIN[1]-PROJ[1]/2, ORIGIN[1]+PROJ[1]/2);
int n = 0;
while (n < NMAX) {
if (complex_abs2(z) > 4) {
break;
}
*z = iterator(z, c);
n++;
}
*z = *c;
return n;
}
int julia(complex_t* z, complex_t* c, double x, double y) {
z->real = linearMap(x, 0, WIDTH, ORIGIN[0]-PROJ[0]/2, ORIGIN[0]+PROJ[0]/2);
z->imag = linearMap(y, 0, HEIGHT, ORIGIN[1]-PROJ[1]/2, ORIGIN[1]+PROJ[1]/2);
int n = 0;
while (n < NMAX) {
if (complex_abs2(z) > 4) {
break;
}
*z = iterator(z, c);
n++;
}
return n;
}
/* Main */
int main(void) {
/* Init graphics */
SDL_Window* window;
SDL_Renderer* renderer;
SDL_Texture* texture;
int is_running = 1;
SDL_Init(SDL_INIT_VIDEO);
window = SDL_CreateWindow("Mandelbrot",
SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
WIDTH, HEIGHT,
0);
renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED);
texture = SDL_CreateTexture(renderer,
SDL_PIXELFORMAT_ARGB8888,
SDL_TEXTUREACCESS_STATIC,
WIDTH, HEIGHT);
Uint32* pixels = malloc(WIDTH*HEIGHT*sizeof(Uint32));
memset(pixels, 0, WIDTH*HEIGHT*sizeof(Uint32));
/* Mandelbrot algorithm */
complex_t z = {0.0, 0.0};
complex_t c = cconst[1];
int n = 0;
int x = 0;
int y = 0;
/* Clean-up */
while (is_running) {
is_running = processEvents(window, is_running, 1); // w/ mouse tracking
#pragma omp simd
for (int i = 0; i<WIDTH*HEIGHT; i++) {
c = cconst[0];
// linear map pixel coordinates
x = i%WIDTH; // pixel coord in x
y = i/WIDTH; // pixel coord in y
/* n = mandelbrot(&z, &c, x, y); */
n = julia(&z, &c, x, y);
/* pixels[i] = getPixelColor(n, n, n, 255); */
/* pixels[i] = singulettMagmaCM(n%256); */
pixels[i] = singulettParulaCM(n%256);
}
/* Project to Renderer and then to Window */
SDL_UpdateTexture(texture, NULL, pixels, WIDTH*sizeof(Uint32));
SDL_RenderClear(renderer);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
SDL_Delay(100);
}
free(pixels);
SDL_DestroyWindow(window);
SDL_DestroyRenderer(renderer);
SDL_DestroyTexture(texture);
SDL_Quit();
}
|
star.np.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define myabs(x,y) (((x) > (y))? ((x)-(y)) : ((y)-(x)))
#if !defined(point)
#define point 7
#endif
#if point == 27
#define kernel(A,t) A[(t+1)%2][x][y][z] = 0.54 * (A[(t)%2][x][y][z]) + \
0.03 * (A[(t)%2][x][y][z-1] + A[(t)%2][x][y-1][z] + \
A[(t)%2][x][y+1][z] + A[(t)%2][x][y][z+1] + \
A[(t)%2][x-1][y][z] + A[(t)%2][x+1][y][z])+ \
0.01 * (A[(t)%2][x-1][y][z-1] + A[(t)%2][x-1][y-1][z] + \
A[(t)%2][x-1][y+1][z] + A[(t)%2][x-1][y][z+1] + \
A[(t)%2][x][y-1][z-1] + A[(t)%2][x][y+1][z-1] + \
A[(t)%2][x][y-1][z+1] + A[(t)%2][x][y+1][z+1] + \
A[(t)%2][x+1][y][z-1] + A[(t)%2][x+1][y-1][z] + \
A[(t)%2][x+1][y+1][z] + A[(t)%2][x+1][y][z+1])+ \
0.02 * (A[(t)%2][x-1][y-1][z-1] + A[(t)%2][x-1][y+1][z-1] + \
A[(t)%2][x-1][y-1][z+1] + A[(t)%2][x-1][y+1][z+1] + \
A[(t)%2][x+1][y-1][z-1] + A[(t)%2][x+1][y+1][z-1] + \
A[(t)%2][x+1][y-1][z+1] + A[(t)%2][x+1][y+1][z+1]);
#define XSLOPE 1
#define YSLOPE 1
#define ZSLOPE 1
#elif point == 7
#define kernel(A,t) A[(t+1)%2][x][y][z] = 0.64 * (A[(t)%2][x][y][z]) + \
0.06 * (A[(t)%2][x - 1][y][z] + A[(t)%2][x][y - 1][z] + \
A[(t)%2][x][y][z - 1] + A[(t)%2][x + 1][y][z] + \
A[(t)%2][x][y + 1][z] + A[(t)%2][x][y][z + 1]);
#define XSLOPE 1
#define YSLOPE 1
#define ZSLOPE 1
#endif
#ifdef CHECK
#define TOLERANCE 0
#endif
#if defined(NX) && defined(NY) && defined(NZ)
double A[2][NX+2*XSLOPE][NY+2*YSLOPE][NZ+2*ZSLOPE];
#endif
int main(int argc, char * argv[])
{
struct timeval start, end;
long int t, i, j, k;
int NX = atoi(argv[1]);
int NY = atoi(argv[2]);
int NZ = atoi(argv[3]);
int T = atoi(argv[4]);
int Bx = atoi(argv[5]);
int tb = atoi(argv[6]);
if(Bx<(2*XSLOPE+1) || Bx>NX || tb>((Bx-1)/2)/XSLOPE)
{
return 0;
}
#if !defined(NX)
double (*A)[NX+2*XSLOPE][NY+2*YSLOPE][NZ+2*ZSLOPE] = (double (*)[NX+2*XSLOPE][NY+2*YSLOPE][NZ+2*ZSLOPE])malloc(sizeof(double)*(NX+2*XSLOPE)*(NY+2*YSLOPE)*(NZ+2*ZSLOPE)*2);
if(NULL == A) return 0;
#endif
#ifdef CHECK
double (*B)[NX+2*XSLOPE][NY+2*YSLOPE][NZ+2*ZSLOPE] = (double (*)[NX+2*XSLOPE][NY+2*YSLOPE][NZ+2*ZSLOPE])malloc(sizeof(double)*(NX+2*XSLOPE)*(NY+2*YSLOPE)*(NZ+2*ZSLOPE)*2);
if(NULL == B) return 0;
#endif
srand(100);
for (i = 0; i < NX+2*XSLOPE; i++) {
for (j = 0; j < NY+2*YSLOPE; j++) {
for (k = 0; k < NZ+2*ZSLOPE; k++) {
A[0][i][j][k] = (double) (1.0 * (rand() % 1024));
A[1][i][j][k] = 0;
#ifdef CHECK
B[0][i][j][k] = A[0][i][j][k];
B[1][i][j][k] = 0;
#endif
}
}
}
int bx = Bx-2*(tb*XSLOPE);
int ix=Bx+bx;//x方向跨度
//奇数时间层,2个空间层,B0和B2,B11和B12对应xleft相反 ybottom相同
/* int xleft0[2] = {XSLOPE, XSLOPE-(ix/2)};
int ybottom0[2] = {XSLOPE-Bx/2, XSLOPE+(bx/2+1)};
int xleft11[2] = {XSLOPE+Bx/2, XSLOPE-(bx/2+1)};
int ybottom11[2] = {XSLOPE, XSLOPE+(ix/2)};
int xleft12[2] = {xleft11[1], xleft11[0]};
int ybottom12[2] = {ybottom11[0], ybottom11[1]};
int xleft2[2] = {xleft0[1], xleft0[0]};
int ybottom2[2] = {ybottom0[0], ybottom0[1]};
*/
int xleft0[2] = {XSLOPE+(bx/2+1), XSLOPE-(Bx/2)};
int ybottom0[2] = {XSLOPE-(Bx/2), XSLOPE+(bx/2+1)};
int xleft11[2] = {XSLOPE+(ix/2), XSLOPE};
int ybottom11[2] = {XSLOPE, XSLOPE+(ix/2)};
int xleft12[2] = {xleft11[1], xleft11[0]};
int ybottom12[2] = {ybottom11[0], ybottom11[1]};
int xleft2[2] = {xleft0[1], xleft0[0]};
int ybottom2[2] = {ybottom0[0], ybottom0[1]};
//偶数时间层的B0,B11,B12,B2分别在奇数层B2,B12,B11,B0的位置
int xnb0[2] = {(NX+XSLOPE-1-xleft0[0])/ix+1, (NX+XSLOPE-1-xleft0[1])/ix+1};//B0在x方向的个数, 空间奇偶层
int ynb0[2] = {(NY+XSLOPE-1-ybottom0[0])/ix+1, (NY+XSLOPE-1-ybottom0[1])/ix+1};//B0在y方向的个数
int xnb11[2] = {(NX+XSLOPE-1-xleft11[0])/ix+1, (NX+XSLOPE-1-xleft11[1])/ix+1};//B11在x方向的个数
int ynb11[2] = {(NY+XSLOPE-1-ybottom11[0])/ix+1, (NY+XSLOPE-1-ybottom11[1])/ix+1};//B11在y方向的个数
int xnb12[2] = {xnb11[1], xnb11[0]};
int ynb12[2] = {ynb11[0], ynb11[1]};
int xnb2[2] = {xnb0[1], xnb0[0]};
int ynb2[2] = {ynb0[0], ynb0[1]};
int nb1[2] = {xnb11[0]*ynb11[0] + xnb11[1]*ynb11[1], xnb12[0]*ynb12[0] + xnb12[1]*ynb12[1]};//奇数时间层B11、B12总个数
int nb02[2] = {xnb0[0]*ynb0[0] + xnb0[1]*ynb0[1], xnb2[0]*ynb2[0] + xnb2[1]*ynb2[1]};//奇、偶时间层B02总个数
int xnb1[2] = {xnb11[0], xnb12[0]};
int xnb02[2] = {xnb0[0], xnb2[0]};
int level = 0;
int tt,n;
int x, y, z, xr;
register int ymin, ymax;
int xmin,xmax;
int dy;
gettimeofday(&start,0);
for(tt=-tb; tt< T; tt+=tb)
{
#pragma omp parallel for schedule(dynamic) private(xmin,xmax,ymin,ymax,t,x,y,z)
for(n=0; n < nb02[level]; n++)
{
for(t= max(tt,0); t <min( tt + 2*tb, T); t++)
{
if(n<xnb0[level]*ynb0[0])
{
xmin = xleft0[level] + (Bx-bx)/2 + (n%xnb0[level])*ix - tb*XSLOPE + myabs(tt+tb,t+1)*XSLOPE;
xmax = xleft0[level] + (Bx-bx)/2 + (n%xnb0[level])*ix + bx + tb*XSLOPE - myabs(tt+tb,t+1)*XSLOPE;
ymin = ybottom0[0] + (Bx-bx)/2 + (n/xnb0[level])*ix - tb*XSLOPE + myabs(tt+tb,t+1)*XSLOPE;
ymax = ybottom0[0] + (Bx-bx)/2 + (n/xnb0[level])*ix + bx + tb*XSLOPE - myabs(tt+tb,t+1)*XSLOPE;
}
else
{
xmin = xleft0[1-level] + (Bx-bx)/2 + ((n-xnb0[level]*ynb0[0])%xnb0[1-level])*ix - tb*XSLOPE + myabs(tt+tb,t+1)*XSLOPE;
xmax = xleft0[1-level] + (Bx-bx)/2 + ((n-xnb0[level]*ynb0[0])%xnb0[1-level])*ix + bx + tb*XSLOPE - myabs(tt+tb,t+1)*XSLOPE;
ymin = ybottom0[1] + (Bx-bx)/2 + ((n-xnb0[level]*ynb0[0])/xnb0[1-level])*ix - tb*XSLOPE + myabs(tt+tb,t+1)*XSLOPE;
ymax = ybottom0[1] + (Bx-bx)/2 + ((n-xnb0[level]*ynb0[0])/xnb0[1-level])*ix + bx + tb*XSLOPE - myabs(tt+tb,t+1)*XSLOPE;
}
for(x=max(XSLOPE,xmin); x<min(NX+XSLOPE,xmax); x++)
{
for(y=max(ymin + myabs((xmin+xmax-1)/2,x), YSLOPE); y<min(ymax - myabs((xmin+xmax-1)/2,x), NY+YSLOPE); y++)
{
#pragma simd
for (z = ZSLOPE; z < NZ+ZSLOPE; z++)
{
kernel(A,t);
}
}
}
}
}
#pragma omp parallel for schedule(dynamic) private(xmin,xmax,ymin,ymax,t,x,y,z,dy,xr,i)
for(n=0; n <nb1[0] + nb1[1]; n++)
{
for(t= tt+tb ; t <min( tt + 2*tb + (bx>1), T+1); t++)
{
if(n<nb1[level]) //B11
{
dy = -1;
if(n<xnb11[level]*ynb11[0]) //空间奇数层B11
{
xmin = xleft11[level] + (n%xnb11[level]) * ix + XSLOPE;
ymax = ybottom11[0] + (n/xnb11[level]) * ix + XSLOPE - 1;
}
else
{
xmin = xleft11[1-level] + ((n-xnb11[level]*ynb11[0])%xnb11[1-level]) * ix + XSLOPE;
ymax = ybottom11[1] + ((n-xnb11[level]*ynb11[0])/xnb11[1-level]) * ix + XSLOPE - 1;
}
}
else //B12
{
dy = 1;
if(n<nb1[level]+xnb12[level]*ynb11[0])
{
xmin = xleft12[level] + ((n-nb1[level])%xnb12[level]) * ix + XSLOPE;
ymax = ybottom11[0] + ((n-nb1[level])/xnb12[level]) * ix + ix/2 - XSLOPE + 1;
}
else
{
xmin = xleft12[1-level] + ((n-nb1[level]-xnb12[level]*ynb11[0])%xnb12[1-level]) * ix + XSLOPE;
ymax = ybottom11[1] + ((n-nb1[level]-xnb12[level]*ynb11[0])/xnb12[1-level]) * ix + ix/2 - XSLOPE + 1;
}
}
xmax = xmin + ix/2 - bx/2 - (t - (tt+tb)) - 2*XSLOPE + 1;
ymin = ymax + dy * (XSLOPE - 1 - ix/2 + bx/2 + XSLOPE + t - (tt+tb));
for(i=0; i<bx/2+1+t-(tt+tb); i++)
{
x = xmin + i + max(ymin-dy*i-(NY+YSLOPE-1), 0);
y = ymin - dy * i + dy * max(ymin-dy*i-(NY+YSLOPE-1), 0);
// xr = x + min(NX+XSLOPE-x,
// (dy==-1) ? ymin - ymax - max(ymin-dy*i-(NY+YSLOPE-1), 0)
// : ymax - ymin - max(ymax-dy*i-(NY+YSLOPE), 0));
//printf("%d\t%d\t%d\n",xr, max(xmax+i-(NX+XSLOPE),0), x + ((dy==-1) ? ymin - ymax - max(ymin-dy*i-(NY+YSLOPE-1), 0) - max(xmax+i-(NX+XSLOPE),0) : ymax - ymin - max(max(ymax-dy*i-(NY+YSLOPE), xmax+i-(NX+XSLOPE)), 0)));
xr = x + ((dy==-1) ? ymin - ymax - max(ymin-dy*i-(NY+YSLOPE-1), 0) - max(xmax+i-(NX+XSLOPE),0)
: ymax - ymin - max(max(ymax-dy*i-(NY+YSLOPE), xmax+i-(NX+XSLOPE)), 0));
// x = max( XSLOPE, xmin + i + max(((dy==-1) ? ymin-dy*i-(NY+YSLOPE-1) : 0),0));//B12的x不用位移
// y = min(NY+YSLOPE-1, ymin - dy * i + dy * max(XSLOPE-(xmin+i),0));
// ny = min(max(0, (dy==-1) ? min(ymin-ymax-max(XSLOPE-(xmin+i),0), NY+YSLOPE-1-(ymax-dy*i)) : min(ymax-dy*i, NY+YSLOPE)-(ymin-dy*i+max(XSLOPE-(xmin+i), 0))), NX+XSLOPE-x);
for(; x<xr; x+=XSLOPE, y+=dy*YSLOPE)
{
if(t>tt+tb)
{
#pragma simd
for (z = ZSLOPE; z < NZ+ZSLOPE; z++)
{
kernel(A,t-1);
}
}
if(t<min(T,tt+2*tb))
{
#pragma simd
for (z = ZSLOPE; z < NZ+ZSLOPE; z++)
{
kernel(A,t);
}
}
}
}
}
}
level = 1- level;
}
gettimeofday(&end,0);
printf("MStencil/s = %f\n", ((double)NX * NY * NZ * T) / (double)(end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) * 1.0e-6) / 1000000L);
#ifdef CHECK
for (t = 0; t < T; t++) {
for (x = XSLOPE; x < NX+XSLOPE; x++) {
for (y = YSLOPE; y < NY+YSLOPE; y++) {
for (z = ZSLOPE; z < NZ+ZSLOPE; z++) {
kernel(B,t);
}
}
}
}
for (i = XSLOPE; i < NX+XSLOPE; i++) {
for (j = YSLOPE; j < NY+YSLOPE; j++) {
for (k = ZSLOPE; k < NZ+ZSLOPE; k++) {
if(myabs(A[T%2][i][j][k], B[T%2][i][j][k]) > TOLERANCE)
printf("Naive[%d][%d][%d] = %f, Check() = %f: FAILED!\n", i, j, k, B[T%2][i][j][k], A[T%2][i][j][k]);
}
}
}
#endif
}
|
common.c | #define PY_SSIZE_T_CLEAN
#include <Python.h>
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define NO_IMPORT_ARRAY
#define PY_ARRAY_UNIQUE_SYMBOL MICPY_ARRAY_API
#include <numpy/arrayobject.h>
#include <numpy/npy_3kcompat.h>
#define _MICARRAYMODULE
#include "common.h"
#include "arrayobject.h"
#include "npy_config.h"
NPY_NO_EXPORT int
_zerofill(PyMicArrayObject *ret)
{
if (PyDataType_REFCHK(PyMicArray_DESCR(ret))) {
PyErr_SetString(PyExc_TypeError, "micpy do not support Object type");
return -1;
}
else {
npy_intp n = PyMicArray_NBYTES(ret);
#pragma omp target device(ret->device)
memset(PyMicArray_DATA(ret), 0, n);
}
return 0;
}
NPY_NO_EXPORT int
_IsAligned(PyMicArrayObject *ap)
{
unsigned int i;
npy_uintp aligned;
npy_uintp alignment = PyMicArray_DESCR(ap)->alignment;
/* alignment 1 types should have a efficient alignment for copy loops */
if (PyMicArray_ISFLEXIBLE(ap) || PyMicArray_ISSTRING(ap)) {
npy_intp itemsize = PyMicArray_ITEMSIZE(ap);
/* power of two sizes may be loaded in larger moves */
if (((itemsize & (itemsize - 1)) == 0)) {
alignment = itemsize > NPY_MAX_COPY_ALIGNMENT ?
NPY_MAX_COPY_ALIGNMENT : itemsize;
}
else {
/* if not power of two it will be accessed bytewise */
alignment = 1;
}
}
if (alignment == 1) {
return 1;
}
aligned = (npy_uintp)PyMicArray_DATA(ap);
for (i = 0; i < PyMicArray_NDIM(ap); i++) {
#if NPY_RELAXED_STRIDES_CHECKING
/* skip dim == 1 as it is not required to have stride 0 */
if (PyMicArray_DIM(ap, i) > 1) {
/* if shape[i] == 1, the stride is never used */
aligned |= (npy_uintp)PyMicArray_STRIDES(ap)[i];
}
else if (PyMicArray_DIM(ap, i) == 0) {
/* an array with zero elements is always aligned */
return 1;
}
#else /* not NPY_RELAXED_STRIDES_CHECKING */
aligned |= (npy_uintp)PyMicArray_STRIDES(ap)[i];
#endif /* not NPY_RELAXED_STRIDES_CHECKING */
}
return mpy_is_aligned((void *)aligned, alignment);
}
NPY_NO_EXPORT npy_bool
_IsWriteable(PyMicArrayObject *ap)
{
PyObject *base=PyMicArray_BASE(ap);
void *dummy;
Py_ssize_t n;
/* If we own our own data, then no-problem */
if ((base == NULL) || (PyMicArray_FLAGS(ap) & NPY_ARRAY_OWNDATA)) {
return NPY_TRUE;
}
/*
* Get to the final base object
* If it is a writeable array, then return TRUE
* If we can find an array object
* or a writeable buffer object as the final base object
* or a string object (for pickling support memory savings).
* - this last could be removed if a proper pickleable
* buffer was added to Python.
*
* MW: I think it would better to disallow switching from READONLY
* to WRITEABLE like this...
*/
while(PyMicArray_Check(base)) {
if (PyMicArray_CHKFLAGS((PyMicArrayObject *)base, NPY_ARRAY_OWNDATA)) {
return (npy_bool) (PyMicArray_ISWRITEABLE((PyMicArrayObject *)base));
}
base = PyMicArray_BASE((PyMicArrayObject *)base);
}
/*
* here so pickle support works seamlessly
* and unpickled array can be set and reset writeable
* -- could be abused --
*/
if (PyString_Check(base)) {
return NPY_TRUE;
}
if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0) {
return NPY_FALSE;
}
return NPY_TRUE;
}
/*
* check whether arrays with datatype dtype might have object fields. This will
* only happen for structured dtypes (which may have hidden objects even if the
* HASOBJECT flag is false), object dtypes, or subarray dtypes whose base type
* is either of these.
*/
NPY_NO_EXPORT int
_may_have_objects(PyArray_Descr *dtype)
{
PyArray_Descr *base = dtype;
if (PyDataType_HASSUBARRAY(dtype)) {
base = dtype->subarray->base;
}
return (PyDataType_HASFIELDS(base) ||
PyDataType_FLAGCHK(base, NPY_ITEM_HASOBJECT) );
}
/**
* Convert an array shape to a string such as "(1, 2)".
*
* @param Dimensionality of the shape
* @param npy_intp pointer to shape array
* @param String to append after the shape `(1, 2)%s`.
*
* @return Python unicode string
*/
NPY_NO_EXPORT PyObject *
convert_shape_to_string(npy_intp n, npy_intp *vals, char *ending)
{
npy_intp i;
PyObject *ret, *tmp;
/*
* Negative dimension indicates "newaxis", which can
* be discarded for printing if it's a leading dimension.
* Find the first non-"newaxis" dimension.
*/
for (i = 0; i < n && vals[i] < 0; i++);
if (i == n) {
return PyUString_FromFormat("()%s", ending);
}
else {
ret = PyUString_FromFormat("(%" NPY_INTP_FMT, vals[i++]);
if (ret == NULL) {
return NULL;
}
}
for (; i < n; ++i) {
if (vals[i] < 0) {
tmp = PyUString_FromString(",newaxis");
}
else {
tmp = PyUString_FromFormat(",%" NPY_INTP_FMT, vals[i]);
}
if (tmp == NULL) {
Py_DECREF(ret);
return NULL;
}
PyUString_ConcatAndDel(&ret, tmp);
if (ret == NULL) {
return NULL;
}
}
if (i == 1) {
tmp = PyUString_FromFormat(",)%s", ending);
}
else {
tmp = PyUString_FromFormat(")%s", ending);
}
PyUString_ConcatAndDel(&ret, tmp);
return ret;
}
NPY_NO_EXPORT void
dot_alignment_error(PyMicArrayObject *a, int i, PyMicArrayObject *b, int j)
{
PyObject *errmsg = NULL, *format = NULL, *fmt_args = NULL,
*i_obj = NULL, *j_obj = NULL,
*shape1 = NULL, *shape2 = NULL,
*shape1_i = NULL, *shape2_j = NULL;
format = PyUString_FromString("shapes %s and %s not aligned:"
" %d (dim %d) != %d (dim %d)");
shape1 = convert_shape_to_string(PyMicArray_NDIM(a), PyMicArray_DIMS(a), "");
shape2 = convert_shape_to_string(PyMicArray_NDIM(b), PyMicArray_DIMS(b), "");
i_obj = PyLong_FromLong(i);
j_obj = PyLong_FromLong(j);
shape1_i = PyLong_FromSsize_t(PyMicArray_DIM(a, i));
shape2_j = PyLong_FromSsize_t(PyMicArray_DIM(b, j));
if (!format || !shape1 || !shape2 || !i_obj || !j_obj ||
!shape1_i || !shape2_j) {
goto end;
}
fmt_args = PyTuple_Pack(6, shape1, shape2,
shape1_i, i_obj, shape2_j, j_obj);
if (fmt_args == NULL) {
goto end;
}
errmsg = PyUString_Format(format, fmt_args);
if (errmsg != NULL) {
PyErr_SetObject(PyExc_ValueError, errmsg);
}
else {
PyErr_SetString(PyExc_ValueError, "shapes are not aligned");
}
end:
Py_XDECREF(errmsg);
Py_XDECREF(fmt_args);
Py_XDECREF(format);
Py_XDECREF(i_obj);
Py_XDECREF(j_obj);
Py_XDECREF(shape1);
Py_XDECREF(shape2);
Py_XDECREF(shape1_i);
Py_XDECREF(shape2_j);
}
/* Convert NPY_CASTING to string
* borrow from numpy */
NPY_NO_EXPORT const char *
npy_casting_to_string(NPY_CASTING casting)
{
switch (casting) {
case NPY_NO_CASTING:
return "'no'";
case NPY_EQUIV_CASTING:
return "'equiv'";
case NPY_SAFE_CASTING:
return "'safe'";
case NPY_SAME_KIND_CASTING:
return "'same_kind'";
case NPY_UNSAFE_CASTING:
return "'unsafe'";
default:
return "<unknown>";
}
}
#define GET_DEVICE(ob, val) ((PyMicArray_Check(ob)) ? \
PyMicArray_DEVICE((PyMicArrayObject *)ob) : (val))
NPY_NO_EXPORT int
get_common_device2(PyObject *op1, PyObject *op2)
{
int cpu_device = omp_get_initial_device();
int dev1, dev2;
dev1 = GET_DEVICE(op1, cpu_device);
dev2 = GET_DEVICE(op2, cpu_device);
/* Prefer current device if devices num are different */
if (dev1 != dev2) {
return CURRENT_DEVICE;
}
return dev1;
}
NPY_NO_EXPORT int
get_common_device(PyObject **ops, int nop)
{
int i, idevice, cdevice, cpu_device;
PyObject *iop;
cpu_device = omp_get_initial_device();
cdevice = GET_DEVICE(ops[0], cpu_device);
for (i = 1; i < nop; ++i) {
iop = ops[i];
idevice = GET_DEVICE(iop, cpu_device);
/* Return current device if devices num are different */
if (idevice != cdevice) {
return CURRENT_DEVICE;
}
}
return cdevice;
} |
data.h | /*!
* Copyright (c) 2015 by Contributors
* \file data.h
* \brief The input data structure of xgboost.
* \author Tianqi Chen
*/
#ifndef XGBOOST_DATA_H_
#define XGBOOST_DATA_H_
#include <dmlc/base.h>
#include <dmlc/data.h>
#include <dmlc/serializer.h>
#include <xgboost/base.h>
#include <xgboost/span.h>
#include <xgboost/host_device_vector.h>
#include <memory>
#include <numeric>
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
namespace xgboost {
// forward declare dmatrix.
class DMatrix;
/*! \brief data type accepted by xgboost interface */
enum class DataType : uint8_t {
kFloat32 = 1,
kDouble = 2,
kUInt32 = 3,
kUInt64 = 4,
kStr = 5
};
enum class FeatureType : uint8_t {
kNumerical,
kCategorical
};
/*!
* \brief Meta information about dataset, always sit in memory.
*/
class MetaInfo {
public:
/*! \brief number of data fields in MetaInfo */
static constexpr uint64_t kNumField = 11;
/*! \brief number of rows in the data */
uint64_t num_row_{0}; // NOLINT
/*! \brief number of columns in the data */
uint64_t num_col_{0}; // NOLINT
/*! \brief number of nonzero entries in the data */
uint64_t num_nonzero_{0}; // NOLINT
/*! \brief label of each instance */
HostDeviceVector<bst_float> labels_; // NOLINT
/*!
* \brief the index of begin and end of a group
* needed when the learning task is ranking.
*/
std::vector<bst_group_t> group_ptr_; // NOLINT
/*! \brief weights of each instance, optional */
HostDeviceVector<bst_float> weights_; // NOLINT
/*!
* \brief initialized margins,
* if specified, xgboost will start from this init margin
* can be used to specify initial prediction to boost from.
*/
HostDeviceVector<bst_float> base_margin_; // NOLINT
/*!
* \brief lower bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_lower_bound_; // NOLINT
/*!
* \brief upper bound of the label, to be used for survival analysis (censored regression)
*/
HostDeviceVector<bst_float> labels_upper_bound_; // NOLINT
/*!
* \brief Name of type for each feature provided by users. Eg. "int"/"float"/"i"/"q"
*/
std::vector<std::string> feature_type_names;
/*!
* \brief Name for each feature.
*/
std::vector<std::string> feature_names;
/*
* \brief Type of each feature. Automatically set when feature_type_names is specifed.
*/
HostDeviceVector<FeatureType> feature_types;
/*
* \brief Weight of each feature, used to define the probability of each feature being
* selected when using column sampling.
*/
HostDeviceVector<float> feature_weigths;
/*! \brief default constructor */
MetaInfo() = default;
MetaInfo(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo&& that) = default;
MetaInfo& operator=(MetaInfo const& that) = delete;
/*!
* \brief Validate all metainfo.
*/
void Validate(int32_t device) const;
MetaInfo Slice(common::Span<int32_t const> ridxs) const;
/*!
* \brief Get weight of each instances.
* \param i Instance index.
* \return The weight.
*/
inline bst_float GetWeight(size_t i) const {
return weights_.Size() != 0 ? weights_.HostVector()[i] : 1.0f;
}
/*! \brief get sorted indexes (argsort) of labels by absolute value (used by cox loss) */
inline const std::vector<size_t>& LabelAbsSort() const {
if (label_order_cache_.size() == labels_.Size()) {
return label_order_cache_;
}
label_order_cache_.resize(labels_.Size());
std::iota(label_order_cache_.begin(), label_order_cache_.end(), 0);
const auto& l = labels_.HostVector();
XGBOOST_PARALLEL_SORT(label_order_cache_.begin(), label_order_cache_.end(),
[&l](size_t i1, size_t i2) {return std::abs(l[i1]) < std::abs(l[i2]);});
return label_order_cache_;
}
/*! \brief clear all the information */
void Clear();
/*!
* \brief Load the Meta info from binary stream.
* \param fi The input stream
*/
void LoadBinary(dmlc::Stream* fi);
/*!
* \brief Save the Meta info to binary stream
* \param fo The output stream.
*/
void SaveBinary(dmlc::Stream* fo) const;
/*!
* \brief Set information in the meta info.
* \param key The key of the information.
* \param dptr The data pointer of the source array.
* \param dtype The type of the source data.
* \param num Number of elements in the source array.
*/
void SetInfo(const char* key, const void* dptr, DataType dtype, size_t num);
/*!
* \brief Set information in the meta info with array interface.
* \param key The key of the information.
* \param interface_str String representation of json format array interface.
*
* [ column_0, column_1, ... column_n ]
*
* Right now only 1 column is permitted.
*/
void SetInfo(const char* key, std::string const& interface_str);
void GetInfo(char const* key, bst_ulong* out_len, DataType dtype,
const void** out_dptr) const;
void SetFeatureInfo(const char *key, const char **info, const bst_ulong size);
void GetFeatureInfo(const char *field, std::vector<std::string>* out_str_vecs) const;
/*
* \brief Extend with other MetaInfo.
*
* \param that The other MetaInfo object.
*
* \param accumulate_rows Whether rows need to be accumulated in this function. If
* client code knows number of rows in advance, set this parameter to false.
*/
void Extend(MetaInfo const& that, bool accumulate_rows);
private:
/*! \brief argsort of labels */
mutable std::vector<size_t> label_order_cache_;
};
/*! \brief Element from a sparse vector */
struct Entry {
/*! \brief feature index */
bst_feature_t index;
/*! \brief feature value */
bst_float fvalue;
/*! \brief default constructor */
Entry() = default;
/*!
* \brief constructor with index and value
* \param index The feature or row index.
* \param fvalue The feature value.
*/
XGBOOST_DEVICE Entry(bst_feature_t index, bst_float fvalue) : index(index), fvalue(fvalue) {}
/*! \brief reversely compare feature values */
inline static bool CmpValue(const Entry& a, const Entry& b) {
return a.fvalue < b.fvalue;
}
inline bool operator==(const Entry& other) const {
return (this->index == other.index && this->fvalue == other.fvalue);
}
};
/*!
* \brief Parameters for constructing batches.
*/
struct BatchParam {
/*! \brief The GPU device to use. */
int gpu_id;
/*! \brief Maximum number of bins per feature for histograms. */
int max_bin{0};
/*! \brief Page size for external memory mode. */
size_t gpu_page_size;
BatchParam() = default;
BatchParam(int32_t device, int32_t max_bin, size_t gpu_page_size = 0)
: gpu_id{device}, max_bin{max_bin}, gpu_page_size{gpu_page_size} {}
inline bool operator!=(const BatchParam& other) const {
return gpu_id != other.gpu_id || max_bin != other.max_bin ||
gpu_page_size != other.gpu_page_size;
}
};
struct HostSparsePageView {
using Inst = common::Span<Entry const>;
common::Span<bst_row_t const> offset;
common::Span<Entry const> data;
Inst operator[](size_t i) const {
auto size = *(offset.data() + i + 1) - *(offset.data() + i);
return {data.data() + *(offset.data() + i),
static_cast<Inst::index_type>(size)};
}
size_t Size() const { return offset.size() == 0 ? 0 : offset.size() - 1; }
};
/*!
* \brief In-memory storage unit of sparse batch, stored in CSR format.
*/
class SparsePage {
public:
// Offset for each row.
HostDeviceVector<bst_row_t> offset;
/*! \brief the data of the segments */
HostDeviceVector<Entry> data;
size_t base_rowid {0};
/*! \brief an instance of sparse vector in the batch */
using Inst = common::Span<Entry const>;
HostSparsePageView GetView() const {
return {offset.ConstHostSpan(), data.ConstHostSpan()};
}
/*! \brief constructor */
SparsePage() {
this->Clear();
}
/*! \return Number of instances in the page. */
inline size_t Size() const {
return offset.Size() == 0 ? 0 : offset.Size() - 1;
}
/*! \return estimation of memory cost of this page */
inline size_t MemCostBytes() const {
return offset.Size() * sizeof(size_t) + data.Size() * sizeof(Entry);
}
/*! \brief clear the page */
inline void Clear() {
base_rowid = 0;
auto& offset_vec = offset.HostVector();
offset_vec.clear();
offset_vec.push_back(0);
data.HostVector().clear();
}
/*! \brief Set the base row id for this page. */
inline void SetBaseRowId(size_t row_id) {
base_rowid = row_id;
}
SparsePage GetTranspose(int num_columns) const;
void SortRows() {
auto ncol = static_cast<bst_omp_uint>(this->Size());
dmlc::OMPException exc;
#pragma omp parallel for schedule(dynamic, 1)
for (bst_omp_uint i = 0; i < ncol; ++i) {
exc.Run([&]() {
if (this->offset.HostVector()[i] < this->offset.HostVector()[i + 1]) {
std::sort(
this->data.HostVector().begin() + this->offset.HostVector()[i],
this->data.HostVector().begin() + this->offset.HostVector()[i + 1],
Entry::CmpValue);
}
});
}
exc.Rethrow();
}
/**
* \brief Pushes external data batch onto this page
*
* \tparam AdapterBatchT
* \param batch
* \param missing
* \param nthread
*
* \return The maximum number of columns encountered in this input batch. Useful when pushing many adapter batches to work out the total number of columns.
*/
template <typename AdapterBatchT>
uint64_t Push(const AdapterBatchT& batch, float missing, int nthread);
/*!
* \brief Push a sparse page
* \param batch the row page
*/
void Push(const SparsePage &batch);
/*!
* \brief Push a SparsePage stored in CSC format
* \param batch The row batch to be pushed
*/
void PushCSC(const SparsePage& batch);
};
class CSCPage: public SparsePage {
public:
CSCPage() : SparsePage() {}
explicit CSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class SortedCSCPage : public SparsePage {
public:
SortedCSCPage() : SparsePage() {}
explicit SortedCSCPage(SparsePage page) : SparsePage(std::move(page)) {}
};
class EllpackPageImpl;
/*!
* \brief A page stored in ELLPACK format.
*
* This class uses the PImpl idiom (https://en.cppreference.com/w/cpp/language/pimpl) to avoid
* including CUDA-specific implementation details in the header.
*/
class EllpackPage {
public:
/*!
* \brief Default constructor.
*
* This is used in the external memory case. An empty ELLPACK page is constructed with its content
* set later by the reader.
*/
EllpackPage();
/*!
* \brief Constructor from an existing DMatrix.
*
* This is used in the in-memory case. The ELLPACK page is constructed from an existing DMatrix
* in CSR format.
*/
explicit EllpackPage(DMatrix* dmat, const BatchParam& param);
/*! \brief Destructor. */
~EllpackPage();
EllpackPage(EllpackPage&& that);
/*! \return Number of instances in the page. */
size_t Size() const;
/*! \brief Set the base row id for this page. */
void SetBaseRowId(size_t row_id);
const EllpackPageImpl* Impl() const { return impl_.get(); }
EllpackPageImpl* Impl() { return impl_.get(); }
private:
std::unique_ptr<EllpackPageImpl> impl_;
};
template<typename T>
class BatchIteratorImpl {
public:
virtual ~BatchIteratorImpl() = default;
virtual T& operator*() = 0;
virtual const T& operator*() const = 0;
virtual void operator++() = 0;
virtual bool AtEnd() const = 0;
};
template<typename T>
class BatchIterator {
public:
using iterator_category = std::forward_iterator_tag; // NOLINT
explicit BatchIterator(BatchIteratorImpl<T>* impl) { impl_.reset(impl); }
void operator++() {
CHECK(impl_ != nullptr);
++(*impl_);
}
T& operator*() {
CHECK(impl_ != nullptr);
return *(*impl_);
}
const T& operator*() const {
CHECK(impl_ != nullptr);
return *(*impl_);
}
bool operator!=(const BatchIterator&) const {
CHECK(impl_ != nullptr);
return !impl_->AtEnd();
}
bool AtEnd() const {
CHECK(impl_ != nullptr);
return impl_->AtEnd();
}
private:
std::shared_ptr<BatchIteratorImpl<T>> impl_;
};
template<typename T>
class BatchSet {
public:
explicit BatchSet(BatchIterator<T> begin_iter) : begin_iter_(std::move(begin_iter)) {}
BatchIterator<T> begin() { return begin_iter_; } // NOLINT
BatchIterator<T> end() { return BatchIterator<T>(nullptr); } // NOLINT
private:
BatchIterator<T> begin_iter_;
};
struct XGBAPIThreadLocalEntry;
/*!
* \brief Internal data structured used by XGBoost during training.
*/
class DMatrix {
public:
/*! \brief default constructor */
DMatrix() = default;
/*! \brief meta information of the dataset */
virtual MetaInfo& Info() = 0;
virtual void SetInfo(const char *key, const void *dptr, DataType dtype,
size_t num) {
this->Info().SetInfo(key, dptr, dtype, num);
}
virtual void SetInfo(const char* key, std::string const& interface_str) {
this->Info().SetInfo(key, interface_str);
}
/*! \brief meta information of the dataset */
virtual const MetaInfo& Info() const = 0;
/*! \brief Get thread local memory for returning data from DMatrix. */
XGBAPIThreadLocalEntry& GetThreadLocal() const;
/**
* \brief Gets batches. Use range based for loop over BatchSet to access individual batches.
*/
template<typename T>
BatchSet<T> GetBatches(const BatchParam& param = {});
template <typename T>
bool PageExists() const;
// the following are column meta data, should be able to answer them fast.
/*! \return Whether the data columns single column block. */
virtual bool SingleColBlock() const = 0;
/*! \brief virtual destructor */
virtual ~DMatrix();
/*! \brief Whether the matrix is dense. */
bool IsDense() const {
return Info().num_nonzero_ == Info().num_row_ * Info().num_col_;
}
/*!
* \brief Load DMatrix from URI.
* \param uri The URI of input.
* \param silent Whether print information during loading.
* \param load_row_split Flag to read in part of rows, divided among the workers in distributed mode.
* \param file_format The format type of the file, used for dmlc::Parser::Create.
* By default "auto" will be able to load in both local binary file.
* \param page_size Page size for external memory.
* \return The created DMatrix.
*/
static DMatrix* Load(const std::string& uri,
bool silent,
bool load_row_split,
const std::string& file_format = "auto",
size_t page_size = kPageSize);
/**
* \brief Creates a new DMatrix from an external data adapter.
*
* \tparam AdapterT Type of the adapter.
* \param [in,out] adapter View onto an external data.
* \param missing Values to count as missing.
* \param nthread Number of threads for construction.
* \param cache_prefix (Optional) The cache prefix for external memory.
* \param page_size (Optional) Size of the page.
*
* \return a Created DMatrix.
*/
template <typename AdapterT>
static DMatrix* Create(AdapterT* adapter, float missing, int nthread,
const std::string& cache_prefix = "",
size_t page_size = kPageSize);
/**
* \brief Create a new Quantile based DMatrix used for histogram based algorithm.
*
* \tparam DataIterHandle External iterator type, defined in C API.
* \tparam DMatrixHandle DMatrix handle, defined in C API.
* \tparam DataIterResetCallback Callback for reset, prototype defined in C API.
* \tparam XGDMatrixCallbackNext Callback for next, prototype defined in C API.
*
* \param iter External data iterator
* \param proxy A hanlde to ProxyDMatrix
* \param reset Callback for reset
* \param next Callback for next
* \param missing Value that should be treated as missing.
* \param nthread number of threads used for initialization.
* \param max_bin Maximum number of bins.
*
* \return A created quantile based DMatrix.
*/
template <typename DataIterHandle, typename DMatrixHandle,
typename DataIterResetCallback, typename XGDMatrixCallbackNext>
static DMatrix *Create(DataIterHandle iter, DMatrixHandle proxy,
DataIterResetCallback *reset,
XGDMatrixCallbackNext *next, float missing,
int nthread,
int max_bin);
virtual DMatrix *Slice(common::Span<int32_t const> ridxs) = 0;
/*! \brief Number of rows per page in external memory. Approximately 100MB per page for
* dataset with 100 features. */
static const size_t kPageSize = 32UL << 12UL;
protected:
virtual BatchSet<SparsePage> GetRowBatches() = 0;
virtual BatchSet<CSCPage> GetColumnBatches() = 0;
virtual BatchSet<SortedCSCPage> GetSortedColumnBatches() = 0;
virtual BatchSet<EllpackPage> GetEllpackBatches(const BatchParam& param) = 0;
virtual bool EllpackExists() const = 0;
virtual bool SparsePageExists() const = 0;
};
template<>
inline BatchSet<SparsePage> DMatrix::GetBatches(const BatchParam&) {
return GetRowBatches();
}
template<>
inline bool DMatrix::PageExists<EllpackPage>() const {
return this->EllpackExists();
}
template<>
inline bool DMatrix::PageExists<SparsePage>() const {
return this->SparsePageExists();
}
template<>
inline BatchSet<CSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetColumnBatches();
}
template<>
inline BatchSet<SortedCSCPage> DMatrix::GetBatches(const BatchParam&) {
return GetSortedColumnBatches();
}
template<>
inline BatchSet<EllpackPage> DMatrix::GetBatches(const BatchParam& param) {
return GetEllpackBatches(param);
}
} // namespace xgboost
namespace dmlc {
DMLC_DECLARE_TRAITS(is_pod, xgboost::Entry, true);
namespace serializer {
template <>
struct Handler<xgboost::Entry> {
inline static void Write(Stream* strm, const xgboost::Entry& data) {
strm->Write(data.index);
strm->Write(data.fvalue);
}
inline static bool Read(Stream* strm, xgboost::Entry* data) {
return strm->Read(&data->index) && strm->Read(&data->fvalue);
}
};
} // namespace serializer
} // namespace dmlc
#endif // XGBOOST_DATA_H_
|
GB_unop__bnot_int16_int16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__bnot_int16_int16
// op(A') function: GB_unop_tran__bnot_int16_int16
// C type: int16_t
// A type: int16_t
// cast: int16_t cij = aij
// unaryop: cij = ~(aij)
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ~(x) ;
// casting
#define GB_CAST(z, aij) \
int16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = aij ; \
Cx [pC] = ~(z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BNOT || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__bnot_int16_int16
(
int16_t *Cx, // Cx and Ax may be aliased
const int16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = ~(z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int16_t aij = Ax [p] ;
int16_t z = aij ;
Cx [p] = ~(z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__bnot_int16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
module_bl_mynn_mynn_tendencies_impl.h | #ifndef __MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_H__
#define __MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_H__
// File granularity version.
#ifndef MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_VERSION_MAJOR
#define MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_VERSION_MAJOR 1
#endif
#ifndef MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_VERSION_MINOR
#define MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_VERSION_MINOR 0
#endif
#ifndef MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_PATCH_VERSION
#define MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_PATCH_VERSION 0
#endif
#ifndef MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_CREATE_DATE
#define MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_CREATE_DATE "Date: 13-11-2016 , Time: 11:18 AM GMT+2"
#endif
// Set this value to successful build date/time.
#ifndef MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_BUILD_DATE
#define MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_BUILD_DATE ""
#endif
#ifndef MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_AUTHOR
#define MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_AUTHOR "Name: Bernard Gingold , e-mail: beniekg@gmail.com"
#endif
#include "module_bl_mynn_F90_iface.h"
#include "PhysLib_Config.h"
#include "std_headers.h"
namespace wrf_phys_wrappers {
namespace module_bl_mynn {
template<typename R32 = float,
typename I32 = int > struct Wrap_Mynn_Tendencies {
/****************************************
Constructors and Destructor.
*****************************************/
/*
@Purpose:
Default Constructor - explicitly default.
*/
Wrap_Mynn_Tendencies() = default;
/*
@Purpose:
1st 'main' Constructor which purpose
is to allocate and initialize scalar
and array members. Array members are
zero-filled. Caller must later initialize
input arrays to correct physical state.
*/
Wrap_Mynn_Tendencies(_In_ const I32 kts,
_In_ const I32 kte,
_In_ const I32 grav_settling,
_In_ const I32 levflag,
_In_ const I32 bl_mynn_cloudmix,
_In_ const I32 bl_mynn_mixqt,
_In_ const I32 bl_mynn_edmf,
_In_ const I32 bl_mynn_edmf_mom,
_In_ const I32 FLAG_QI,
_In_ const I32 FLAG_QNI,
_In_ const I32 FLAG_QC,
_In_ const I32 FLAG_QNC,
_In_ const R32 delt,
_In_ const R32 ust,
_In_ const R32 flt,
_In_ const R32 flq,
_In_ const R32 flqv,
_In_ const R32 flqc,
_In_ const R32 wspd,
_In_ const R32 uoce,
_In_ const R32 voce,
_In_ const R32 qcg,
_In_ const R32 vdfg1)
:
m_kts{ kts },
m_kte{ kte },
m_grav_settling{ grav_settling },
m_levflag{ levflag },
m_bl_mynn_cloudmix{ bl_mynn_cloudmix },
m_bl_mynn_mixqt{ bl_mynn_mixqt },
m_bl_mynn_edmf{ bl_mynn_edmf },
m_bl_mynn_edmf_mom{ bl_mynn_edmf_mom },
m_FLAG_QI{ FLAG_QI },
m_FLAG_QNI{ FLAG_QNI },
m_FLAG_QC{ FLAG_QC },
m_FLAG_QNC{ FLAG_QNC },
m_delt{ delt },
m_ust{ ust },
m_flt{ flt },
m_flq{ flq },
m_flqv{ flqv },
m_flqc{ flqc },
m_wspd{ wspd },
m_uoce{ uoce },
m_voce{ voce },
m_qcg{ qcg },
m_vdfg1{ vdfg1 },
m_s_aw{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awthl{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awqt{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awqv{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awqc{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awu{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awv{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_u{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_v{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_th{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_tk{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qv{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qi{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qni{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qnc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_p{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_exner{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dfq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dz{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_tsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_cov{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_tcd{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qcd{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_cldfra_bl1D{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_thl{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_sqw{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_sqv{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_sqc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_sqi{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dfm{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dfh{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_du{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dv{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dth{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dqv{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dqc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dqi{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dqni{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) } {
if (0 > (m_kte - m_kts)) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Invalid array size 1st Ctor: 'Wrap_Mynn_Tendencies'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Lower range value m_kts: " << m_kts << "\n";
std::cerr << "Upper range value m_kte: " << m_kte << "\n";
std::cerr << "Range value difference: " << m_kte - m_kts << "\n";
std::cerr << "Cannot recover --> calling exit(-1)!!\n";
std::exit(-1);
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
if ((&this->m_s_aw)[i] == NULL) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 1st Ctor: 'Wrap_Mynn_Tendencies'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_s_aw)[i] << "\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
}
#if defined (USE_ICL_OPENMP) && \
OPENMP_CURR_VER >= 40
#pragma omp parallel for if(m_kte >= (1 << 16))
for (int idx = 0; i < this->m_totArrays; ++idx) {
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
for (int i = m_kts; i != m_kte; ++i) {
(&this->m_s_aw)[idx][i] = 0.f;
}
}
const int top = m_kte + 1;
(&this->m_s_aw)[0][top] = 0.f;
(&this->m_s_aw)[1][top] = 0.f;
(&this->m_s_aw)[2][top] = 0.f;
(&this->m_s_aw)[3][top] = 0.f;
(&this->m_s_aw)[4][top] = 0.f;
(&this->m_s_aw)[5][top] = 0.f;
(&this->m_s_aw)[6][top] = 0.f;
#else
// You must not #undef 'USE_AUTO_VECTORIZATION' macro!!
#if defined (USE_AUTO_VECTORIZATION)
for (int idx = 0; i < this->m_totArrays; ++idx) {
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
for (int i = m_kts; i != m_kte; ++i) {
(&this->m_s_aw)[idx][i] = 0.f;
}
}
#endif
const int top = m_kte + 1;
(&this->m_s_aw)[0][top] = 0.f;
(&this->m_s_aw)[1][top] = 0.f;
(&this->m_s_aw)[2][top] = 0.f;
(&this->m_s_aw)[3][top] = 0.f;
(&this->m_s_aw)[4][top] = 0.f;
(&this->m_s_aw)[5][top] = 0.f;
(&this->m_s_aw)[6][top] = 0.f;
#endif
}
/*
@Purpose:
2nd 'main' Constructor which purpose
is to allocate and initialize scalar
and array members. Array output members are
zero-filled. Caller must pass initialized
input arrays to correct physical state.
*/
Wrap_Mynn_Tendencies(_In_ const I32 kts,
_In_ const I32 kte,
_In_ const I32 grav_settling,
_In_ const I32 levflag,
_In_ const I32 bl_mynn_cloudmix,
_In_ const I32 bl_mynn_mixqt,
_In_ const I32 bl_mynn_edmf,
_In_ const I32 bl_mynn_edmf_mom,
_In_ const I32 FLAG_QI,
_In_ const I32 FLAG_QNI,
_In_ const I32 FLAG_QC,
_In_ const I32 FLAG_QNC,
_In_ const R32 delt,
_In_ const R32 ust,
_In_ const R32 flt,
_In_ const R32 flq,
_In_ const R32 flqv,
_In_ const R32 flqc,
_In_ const R32 wspd,
_In_ const R32 uoce,
_In_ const R32 voce,
_In_ const R32 qcg,
_In_ const R32 vdfg1,
_In_ R32* __restrict const s_aw,
_In_ R32* __restrict const s_awthl,
_In_ R32* __restrict const s_awqt,
_In_ R32* __restrict const s_awqv,
_In_ R32* __restrict const s_awqc,
_In_ R32* __restrict const s_awu,
_In_ R32* __restrict const s_awv,
_In_ R32* __restrict const u,
_In_ R32* __restrict const v,
_In_ R32* __restrict const th,
_In_ R32* __restrict const tk,
_In_ R32* __restrict const qv,
_In_ R32* __restrict const qc,
_In_ R32* __restrict const qi,
_In_ R32* __restrict const qni,
_In_ R32* __restrict const qnc,
_In_ R32* __restrict const p,
_In_ R32* __restrict const exner,
_In_ R32* __restrict const dfq,
_In_ R32* __restrict const dz,
_In_ R32* __restrict const tsq,
_In_ R32* __restrict const qsq,
_In_ R32* __restrict const cov,
_In_ R32* __restrict const tcd,
_In_ R32* __restrict const qcd,
_In_ R32* __restrict const cldfra_bl1D,
_In_ R32* __restrict const thl,
_In_ R32* __restrict const sqw,
_In_ R32* __restrict const sqv,
_In_ R32* __restrict const sqc,
_In_ R32* __restrict const sqi,
_In_ R32* __restrict const dfm,
_In_ R32* __restrict const dfh,
_In_ R32* __restrict const du,
_In_ R32* __restrict const dv,
_In_ R32* __restrict const dth,
_In_ R32* __restrict const dqv,
_In_ R32* __restrict const dqc,
_In_ R32* __restrict const dqi,
_In_ R32* __restrict const dqni)
:
m_kts{ kts },
m_kte{ kte },
m_grav_settling{ grav_settling },
m_levflag{ levflag },
m_bl_mynn_cloudmix{ bl_mynn_cloudmix },
m_bl_mynn_mixqt{ bl_mynn_mixqt },
m_bl_mynn_edmf{ bl_mynn_edmf },
m_bl_mynn_edmf_mom{ bl_mynn_edmf_mom },
m_FLAG_QI{ FLAG_QI },
m_FLAG_QNI{ FLAG_QNI },
m_FLAG_QC{ FLAG_QC },
m_FLAG_QNC{ FLAG_QNC },
m_delt{ delt },
m_ust{ ust },
m_flt{ flt },
m_flq{ flq },
m_flqv{ flqv },
m_flqc{ flqc },
m_wspd{ wspd },
m_uoce{ uoce },
m_voce{ voce },
m_qcg{ qcg },
m_vdfg1{ vdfg1 },
m_s_aw{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awthl{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awqt{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awqv{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awqc{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awu{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awv{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_u{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_v{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_th{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_tk{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qv{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qi{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qni{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qnc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_p{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_exner{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dfq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dz{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_tsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_cov{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_tcd{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qcd{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_cldfra_bl1D{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_thl{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_sqw{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_sqv{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_sqc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_sqi{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dfm{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dfh{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_du{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dv{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dth{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dqv{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dqc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dqi{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dqni{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) } {
if (0 > (m_kte - m_kts)) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Invalid array size 2nd Ctor: 'Wrap_Mynn_Tendencies'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Lower range value m_kts: " << m_kts << "\n";
std::cerr << "Upper range value m_kte: " << m_kte << "\n";
std::cerr << "Range value difference: " << m_kte - m_kts << "\n";
std::cerr << "Cannot recover --> calling exit(-1)!!\n";
std::exit(-1);
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
if ((&this->m_s_aw)[i] == NULL) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 2nd Ctor: 'Wrap_Mynn_Tendencies'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_s_aw)[i] << "\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
}
if (s_aw == NULL ||
s_awthl == NULL ||
s_awqt == NULL ||
s_awqv == NULL ||
s_awqc == NULL ||
s_awu == NULL ||
s_awv == NULL ||
u == NULL ||
v == NULL ||
th == NULL ||
tk == NULL ||
qv == NULL ||
qc == NULL ||
qi == NULL ||
qni == NULL ||
qnc == NULL ||
p == NULL ||
exner == NULL ||
dfq == NULL ||
dz == NULL ||
tsq == NULL ||
qsq == NULL ||
cov == NULL ||
tcd == NULL ||
qcd == NULL ||
cldfra_bl1D == NULL ||
thl == NULL ||
sqw == NULL ||
sqv == NULL ||
sqc == NULL ||
sqi == NULL ||
dfm == NULL ||
dfh == NULL ||
du == NULL ||
dv == NULL ||
dth == NULL ||
dqv == NULL ||
dqc == NULL ||
dqi == NULL ||
dqni ) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 2nd Ctor: 'Wrap_Mynn_Tendencies'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "One or more caller's arrays contains invalid pointer!!\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
#if defined (USE_ICL_OPENMP) && \
OPENMP_CURR_VER >= 40
#pragma omp parallel for if(m_kte >= (1 << 16))
for (int i{ 0 }; i != this->m_totArrays; ++i) {
m_s_aw[i] = s_aw[i];
m_s_awthl[i] = s_awthl[i];
m_s_awqt[i] = s_awqt[i];
m_s_awqv[i] = s_awqv[i];
m_s_awqc[i] = s_awqc[i];
m_s_awu[i] = s_awu[i];
m_s_awv[i] = s_awv[i];
m_u[i] = u[i];
m_v[i] = v[i];
m_th[i] = th[i];
m_tk[i] = tk[i];
m_qv[i] = qv[i];
m_qc[i] = qc[i];
m_qi[i] = qi[i];
m_qni[i] = qni[i];
m_qnc[i] = qnc[i];
m_p[i] = p[i];
m_exner[i] = exner[i];
m_dfq[i] = dfq[i];
m_dz[i] = dz[i];
m_tsq[i] = tsq[i];
m_qsq[i] = qsq;
m_cov = cov[i];
m_tcd[i] = tcd[i];
m_qcd[i] = qcd[i];
m_cldfra_bl1D[i] = cldfra_bl1D[i];
m_thl[i] = thl[i];
m_sqw[i] = sqw[i];
m_sqv[i] = sqv[i];
m_sqc[i] = sqc[i];
m_sqi[i] = sqi[i];
m_dfm[i] = dfm[i];
m_dfh[i] = dfh[i];
m_du[i] = du[i];
m_dv[i] = dv[i];
m_dth[i] = dth[i];
m_dqv[i] = dqv[i];
m_dqc[i] = dqc[i];
m_dqi[i] = dqi[i];
m_dqni[i] = dqni[i];
}
const int top = m_kte + 1;
m_s_aw[top] = s_aw[top];
m_s_awthl[top] = s_awthl[top];
m_s_awqt[top] = s_awqt[top];
m_s_awqv[top] = s_awqv[top];
m_s_awqc[top] = s_awqc[top];
m_s_awu[top] = s_awu[top];
m_s_awv[top] = s_awv[top];
#else
// You must not #undef 'USE_AUTO_VECTORIZATION' macro!
#if defined (USE_AUTO_VECTORIZATION)
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
for (int i{ 0 }; i != this->m_totArrays; ++i) {
m_s_aw[i] = s_aw[i];
m_s_awthl[i] = s_awthl[i];
m_s_awqt[i] = s_awqt[i];
m_s_awqv[i] = s_awqv[i];
m_s_awqc[i] = s_awqc[i];
m_s_awu[i] = s_awu[i];
m_s_awv[i] = s_awv[i];
m_u[i] = u[i];
m_v[i] = v[i];
m_th[i] = th[i];
m_tk[i] = tk[i];
m_qv[i] = qv[i];
m_qc[i] = qc[i];
m_qi[i] = qi[i];
m_qni[i] = qni[i];
m_qnc[i] = qnc[i];
m_p[i] = p[i];
m_exner[i] = exner[i];
m_dfq[i] = dfq[i];
m_dz[i] = dz[i];
m_tsq[i] = tsq[i];
m_qsq[i] = qsq;
m_cov = cov[i];
m_tcd[i] = tcd[i];
m_qcd[i] = qcd[i];
m_cldfra_bl1D[i] = cldfra_bl1D[i];
m_thl[i] = thl[i];
m_sqw[i] = sqw[i];
m_sqv[i] = sqv[i];
m_sqc[i] = sqc[i];
m_sqi[i] = sqi[i];
m_dfm[i] = dfm[i];
m_dfh[i] = dfh[i];
m_du[i] = du[i];
m_dv[i] = dv[i];
m_dth[i] = dth[i];
m_dqv[i] = dqv[i];
m_dqc[i] = dqc[i];
m_dqi[i] = dqi[i];
m_dqni[i] = dqni[i];
}
const int top = m_kte + 1;
m_s_aw[top] = s_aw[top];
m_s_awthl[top] = s_awthl[top];
m_s_awqt[top] = s_awqt[top];
m_s_awqv[top] = s_awqv[top];
m_s_awqc[top] = s_awqc[top];
m_s_awu[top] = s_awu[top];
m_s_awv[top] = s_awv[top];
#endif
#endif
}
/*
@Purpose:
Copy Constructor implements deep copy semantics.
*/
Wrap_Mynn_Tendencies(_In_ const Wrap_Mynn_Tendencies &x)
:
m_kts{ x.m_kts },
m_kte{ x.m_kte },
m_grav_settling{ x.m_grav_settling },
m_levflag{ x.m_levflag },
m_bl_mynn_cloudmix{ x.m_bl_mynn_cloudmix },
m_bl_mynn_mixqt{ x.m_bl_mynn_mixqt },
m_bl_mynn_edmf{ x.m_bl_mynn_edmf },
m_bl_mynn_edmf_mom{ x.m_bl_mynn_edmf_mom },
m_FLAG_QI{ x.m_FLAG_QI },
m_FLAG_QNI{ x.m_FLAG_QNI },
m_FLAG_QC{ x.m_FLAG_QC },
m_FLAG_QNC{ x.m_FLAG_QNC },
m_delt{ x.m_delt },
m_ust{ x.m_ust },
m_flt{ x.m_flt },
m_flq{ x.m_flq },
m_flqv{ x.m_flqv },
m_flqc{ x.m_flqc },
m_wspd{ x.m_wspd },
m_uoce{ x.m_uoce },
m_voce{ x.m_voce },
m_qcg{ x.m_qcg },
m_vdfg1{ x.m_vdfg1 },
m_s_aw{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awthl{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awqt{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awqv{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awqc{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awu{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_s_awv{ reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)), align32B)) },
m_u{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_v{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_th{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_tk{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qv{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qi{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qni{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qnc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_p{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_exner{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dfq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dz{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_tsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qsq{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_cov{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_tcd{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_qcd{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_cldfra_bl1D{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_thl{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_sqw{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_sqv{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_sqc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_sqi{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dfm{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dfh{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_du{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dv{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dth{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dqv{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dqc{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dqi{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) },
m_dqni{ reinterpret_cast<R32*>(_mm_malloc((m_kte * sizeof(R32)), align32B)) } {
for (int i{ 0 }; i != this->m_totArrays; ++i) {
if ((&this->m_s_aw)[i] == NULL) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in Copy-Ctor: 'Wrap_Mynn_Tendencies'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_s_aw)[i] << "\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
}
#if defined (USE_ICL_OPENMP) && \
OPENMP_CURR_VER >= 40
#pragma omp parallel for if(m_kte >= (1 << 16))
for (int idx = 0; idx != this->m_totArrays; ++i) {
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
for (int i = m_kts; i != m_kte; ++i) {
(&this->m_s_aw)[idx][i] = (&x.m_s_aw)[idx][i];
}
}
const int top = m_kte + 1;
(&this->m_s_aw)[0][top] = (&x.m_s_aw)[0][top];
(&this->m_s_aw)[1][top] = (&x.m_s_aw)[1][top];
(&this->m_s_aw)[2][top] = (&x.m_s_aw)[2][top];
(&this->m_s_aw)[3][top] = (&x.m_s_aw)[3][top];
(&this->m_s_aw)[4][top] = (&x.m_s_aw)[4][top];
(&this->m_s_aw)[5][top] = (&x.m_s_aw)[5][top];
(&this->m_s_aw)[6][top] = (&x.m_s_aw)[6][top];
#else
// You must not #undef 'USE_AUTO_VECTORIZATION' macro !!
#if defined (USE_AUTO_VECTORIZATION)
for (int idx = 0; idx != this->m_totArrays; ++i) {
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
for (int i = m_kts; i != m_kte; ++i) {
(&this->m_s_aw)[idx][i] = (&x.m_s_aw)[idx][i];
}
}
const int top = m_kte + 1;
(&this->m_s_aw)[0][top] = (&x.m_s_aw)[0][top];
(&this->m_s_aw)[1][top] = (&x.m_s_aw)[1][top];
(&this->m_s_aw)[2][top] = (&x.m_s_aw)[2][top];
(&this->m_s_aw)[3][top] = (&x.m_s_aw)[3][top];
(&this->m_s_aw)[4][top] = (&x.m_s_aw)[4][top];
(&this->m_s_aw)[5][top] = (&x.m_s_aw)[5][top];
(&this->m_s_aw)[6][top] = (&x.m_s_aw)[6][top];
#endif
#endif
}
/*
@Purpose:
Move Constructor implements shallow copy semantics.
*/
Wrap_Mynn_Tendencies(_In_ Wrap_Mynn_Tendencies &&x)
:
m_kts{ x.m_kts },
m_kte{ x.m_kte },
m_grav_settling{ x.m_grav_settling },
m_levflag{ x.m_levflag },
m_bl_mynn_cloudmix{ x.m_bl_mynn_cloudmix },
m_bl_mynn_mixqt{ x.m_bl_mynn_mixqt },
m_bl_mynn_edmf{ x.m_bl_mynn_edmf },
m_bl_mynn_edmf_mom{ x.m_bl_mynn_edmf_mom },
m_FLAG_QI{ x.m_FLAG_QI },
m_FLAG_QNI{ x.m_FLAG_QNI },
m_FLAG_QC{ x.m_FLAG_QC },
m_FLAG_QNC{ x.m_FLAG_QNC },
m_delt{ x.m_delt },
m_ust{ x.m_ust },
m_flt{ x.m_flt },
m_flq{ x.m_flq },
m_flqv{ x.m_flqv },
m_flqc{ x.m_flqc },
m_wspd{ x.m_wspd },
m_uoce{ x.m_uoce },
m_voce{ x.m_voce },
m_qcg{ x.m_qcg },
m_vdfg1{ x.m_vdfg1 } {
for (int i{ 0 }; i != this->m_totArrays; ++i) {
(&this->m_s_aw)[i] = (&x.m_s_aw)[i];
}
for (int i{ 0 }; i != x.m_totArrays; ++i) {
(&x.m_s_aw)[i] = NULL;
}
x.m_kts = 0;
x.m_kte = 0;
}
/*
@Purpose:
Class Destructor.
*/
~Wrap_Mynn_Tendencies() {
for (int i{ 0 }; i != this->m_totArrays; ++i) {
if ((&this->m_s_aw)[i]) {
_mm_free((&this->m_s_aw)[i]);
}
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
(&this->m_s_aw)[i] = NULL;
}
m_kts = 0;
m_kte = 0;
}
/*
@Purpose:
Copy-assign Operator implements deep copy semantics.
*/
Wrap_Mynn_Tendencies & operator=(_In_ const Wrap_Mynn_Tendencies &x) {
if (this == &x) return (*this);
m_kts = x.m_kts;
m_kte = x.m_kte;
m_grav_settling = x.m_grav_settling;
m_levflag = x.m_levflag;
m_bl_mynn_cloudmix = x.m_bl_mynn_cloudmix;
m_bl_mynn_mixqt = x.m_bl_mynn_mixqt;
m_bl_mynn_edmf = x.m_bl_mynn_edmf;
m_bl_mynn_edmf_mom = x.m_bl_mynn_edmf_mom;
m_FLAG_QI = x.m_FLAG_QI;
m_FLAG_QNI = x.m_FLAG_QNI;
m_FLAG_QC = x.m_FLAG_QC;
m_FLAG_QNC = x.m_FLAG_QNC;
m_delt = x.m_delt;
m_ust = x.m_ust;
m_flt = x.m_flt;
m_flq = x.m_flq;
m_flqv = x.m_flqv;
m_flqc = x.m_flqc;
m_wspd = x.m_wspd;
m_uoce = x.m_uoce;
m_voce = x.m_voce;
m_qcg = x.m_qcg;
m_vdfg1 = x.m_vdfg1;
constexpr int ntPtrs1D{40};
R32 *tPtrs1D[ntPtrs1D] = {};
for (int i{ 0 }; i != this->m_totArrays; ++i) {
tPtrs1D[i] = reinterpret_cast<R32*>(_mm_malloc(((m_kte + 1) * sizeof(R32)),align32B));
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
if (tPtrs1D[i] == NULL) {
std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in Copy Operator: 'Wrap_Mynn_Tendencies'!!\n";
std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n";
std::cerr << "***** ERROR-DETAILS ***** \n";
std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << tPtrs1D[i] << "\n";
std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n";
std::exit(-1);
}
}
#if defined (USE_ICL_OPENMP) && \
OPENMP_CURR_VER >= 40
#pragma omp parallel for if(m_kte >= (1 << 16))
for (int idx = 0; idx != this->m_totArrays; ++idx) {
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
for (int i = m_kts; i != m_kte; ++i) {
tPtrs1D[idx][i] = (&x.m_s_aw)[idx][i];
}
}
const int top = m_kte + 1;
tPtrs1D[0][top] = x.m_s_aw[top];
tPtrs1D[1][top] = x.m_s_awthl[top];
tPtrs1D[2][top] = x.m_s_awqt[top];
tPtrs1D[3][top] = x.m_s_awqv[top];
tPtrs1D[4][top] = x.m_s_awqc[top];
tPtrs1D[5][top] = x.m_s_awu[top];
tPtrs1D[6][top] = x.m_s_awv[top];
for (int i {0}; i != this->m_totArrays; ++i) {
_mm_free((&this->m_s_aw)[i]);
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
(&this->m_s_aw)[i] = tPtrs1D[i];
}
return (*this);
#else
#if defined (USE_AUTO_VECTORIZATION)
for (int idx = 0; idx != this->m_totArrays; ++idx) {
#pragma ivdep
#pragma simd
#pragma unroll(UNROLL_4X)
for (int i = m_kts; i != m_kte; ++i) {
tPtrs1D[idx][i] = (&x.m_s_aw)[idx][i];
}
}
const int top = m_kte + 1;
tPtrs1D[0][top] = x.m_s_aw[top];
tPtrs1D[1][top] = x.m_s_awthl[top];
tPtrs1D[2][top] = x.m_s_awqt[top];
tPtrs1D[3][top] = x.m_s_awqv[top];
tPtrs1D[4][top] = x.m_s_awqc[top];
tPtrs1D[5][top] = x.m_s_awu[top];
tPtrs1D[6][top] = x.m_s_awv[top];
for (int i{ 0 }; i != this->m_totArrays; ++i) {
_mm_free((&this->m_s_aw)[i]);
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
(&this->m_s_aw)[i] = tPtrs1D[i];
}
return (*this);
#endif
#endif
}
/*
@Purpose:
Move-assign Operator implements shallow copy semantics.
*/
Wrap_Mynn_Tendencies & operator=(_In_ Wrap_Mynn_Tendencies &&x) {
if (this == &x) return (*this);
m_kts = x.m_kts;
m_kte = x.m_kte;
m_grav_settling = x.m_grav_settling;
m_levflag = x.m_levflag;
m_bl_mynn_cloudmix = x.m_bl_mynn_cloudmix;
m_bl_mynn_mixqt = x.m_bl_mynn_mixqt;
m_bl_mynn_edmf = x.m_bl_mynn_edmf;
m_bl_mynn_edmf_mom = x.m_bl_mynn_edmf_mom;
m_FLAG_QI = x.m_FLAG_QI;
m_FLAG_QNI = x.m_FLAG_QNI;
m_FLAG_QC = x.m_FLAG_QC;
m_FLAG_QNC = x.m_FLAG_QNC;
m_delt = x.m_delt;
m_ust = x.m_ust;
m_flt = x.m_flt;
m_flq = x.m_flq;
m_flqv = x.m_flqv;
m_flqc = x.m_flqc;
m_wspd = x.m_wspd;
m_uoce = x.m_uoce;
m_voce = x.m_voce;
m_qcg = x.m_qcg;
m_vdfg1 = x.m_vdfg1;
for (int i{ 0 }; i != this->m_totArrays; ++i) {
if ((&this->m_s_aw)[i]) {
_mm_free((&this->m_s_aw)[i]);
}
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
(&this->m_s_aw)[i] = (&x.m_s_aw)[i];
}
for (int i{ 0 }; i != this->m_totArrays; ++i) {
(&x.m_s_aw)[i] = NULL;
}
x.m_kts = 0;
x.m_kte = 0;
return (*this);
}
/*
@Purpose:
Call Fortran 90 'MYNN_TENDENCIES' subroutine.
*/
void Call_Mynn_Tendencies() {
MODULE_BL_MYNN_mp_MYNN_TENDENCIES(&this->m_kts, &this->m_kte,
&this->m_levflag, &this->m_grav_settling,
&this->m_delt, &this->m_dz[0],
&this->m_u[0], &this->m_v[0], &this->m_th[0], &this->m_tk[0],
&this->m_qv[0], &this->m_qc[0], &this->m_qi[0], &this->m_qni[0],
&this->m_qnc[0], &this->m_p[0], &this->m_exner[0],
&this->m_thl[0], &this->m_sqv[0], &this->m_sqc[0], &this->m_sqi[0],
&this->m_sqw[0], &this->m_ust, &this->m_flt, &this->m_flq, &this->m_flqv,
&this->m_flqc, &this->m_wspd, &this->m_qcg, &this->m_uoce, &this->m_voce,
&this->m_tsq[0], &this->m_qsq[0], &this->m_cov[0],
&this->m_tcd[0], &this->m_qcd[0], &this->m_dfm[0], &this->m_dfh[0],
&this->m_dfq[0], &this->m_du[0], &this->m_dv[0], &this->m_dqv[0],
&this->m_dqc[0], &this->m_dqi[0], &this->m_dqni[0], &this->m_vdfg1,
&this->m_s_aw[0], &this->m_s_awthl[0], &this->m_s_awqt[0], &this->m_s_awqv[0],
&this->m_s_awqc[0], &this->m_s_awu[0], &this->m_s_awv[0],
&this->m_FLAG_QI, &this->m_FLAG_QNI, &this->m_FLAG_QC, &this->m_FLAG_QNC,
&this->m_cldfra_bl1D[0], &this->m_bl_mynn_cloudmix,
&this->m_bl_mynn_mixqt, &this->m_bl_mynn_edmf, &this->m_bl_mynn_edmf_mom);
}
/*
@Purpose:
Member variables:
*/
// Scalar members!
I32 m_kts;
I32 m_kte;
I32 m_grav_settling;
I32 m_levflag;
I32 m_bl_mynn_cloudmix;
I32 m_bl_mynn_mixqt;
I32 m_bl_mynn_edmf;
I32 m_bl_mynn_edmf_mom;
I32 m_FLAG_QI;
I32 m_FLAG_QNI;
I32 m_FLAG_QC;
I32 m_FLAG_QNC;
R32 m_delt;
R32 m_ust;
R32 m_flt;
R32 m_flq;
R32 m_flqv;
R32 m_flqc;
R32 m_wspd;
R32 m_uoce;
R32 m_voce;
R32 m_qcg;
R32 m_vdfg1;
// Array 1D members!
// Input arrays1D
_Field_size_(m_kte + 1) R32* __restrict m_s_aw;
_Field_size_(m_kte + 1) R32* __restrict m_s_awthl;
_Field_size_(m_kte + 1) R32* __restrict m_s_awqt;
_Field_size_(m_kte + 1) R32* __restrict m_s_awqv;
_Field_size_(m_kte + 1) R32* __restrict m_s_awqc;
_Field_size_(m_kte + 1) R32* __restrict m_s_awu;
_Field_size_(m_kte + 1) R32* __restrict m_s_awv;
_Field_size_(m_kte) R32* __restrict m_u;
_Field_size_(m_kte) R32* __restrict m_v;
_Field_size_(m_kte) R32* __restrict m_th;
_Field_size_(m_kte) R32* __restrict m_tk;
_Field_size_(m_kte) R32* __restrict m_qv;
_Field_size_(m_kte) R32* __restrict m_qc;
_Field_size_(m_kte) R32* __restrict m_qi;
_Field_size_(m_kte) R32* __restrict m_qni;
_Field_size_(m_kte) R32* __restrict m_qnc;
_Field_size_(m_kte) R32* __restrict m_p;
_Field_size_(m_kte) R32* __restrict m_exner;
_Field_size_(m_kte) R32* __restrict m_dfq;
_Field_size_(m_kte) R32* __restrict m_dz;
_Field_size_(m_kte) R32* __restrict m_tsq;
_Field_size_(m_kte) R32* __restrict m_qsq;
_Field_size_(m_kte) R32* __restrict m_cov;
_Field_size_(m_kte) R32* __restrict m_tcd;
_Field_size_(m_kte) R32* __restrict m_qcd;
_Field_size_(m_kte) R32* __restrict m_cldfra_bl1D;
// Input/Output arrays 1D!
_Field_size_(m_kte) R32* __restrict m_thl;
_Field_size_(m_kte) R32* __restrict m_sqw;
_Field_size_(m_kte) R32* __restrict m_sqv;
_Field_size_(m_kte) R32* __restrict m_sqc;
_Field_size_(m_kte) R32* __restrict m_sqi;
_Field_size_(m_kte) R32* __restrict m_dfm;
_Field_size_(m_kte) R32* __restrict m_dfh;
_Field_size_(m_kte) R32* __restrict m_du;
_Field_size_(m_kte) R32* __restrict m_dv;
_Field_size_(m_kte) R32* __restrict m_dth;
_Field_size_(m_kte) R32* __restrict m_dqv;
_Field_size_(m_kte) R32* __restrict m_dqc;
_Field_size_(m_kte) R32* __restrict m_dqi;
_Field_size_(m_kte) R32* __restrict m_dqni;
static const int m_totArrays = 40;
};
}
}
#endif /*__MODULE_BL_MYNN_MYNN_TENDENCIES_IMPL_H__*/ |
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
// TODO SYCL Integration header approach relies on an assumption that kernel
// lambda objects created by the host compiler and any of the device compilers
// will be identical wrt to field types, order and offsets. Some verification
// mechanism should be developed to enforce that.
// TODO FIXME SYCL Support for SYCL in FE should be refactored:
// - kernel identification and generation should be made a separate pass over
// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl +
// FunctionTemplateDecl::getSpecializations() mechanism could be used for that.
// - All SYCL stuff on Sema level should be encapsulated into a single Sema
// field
// - Move SYCL stuff into a separate header
// Represents contents of a SYCL integration header file produced by a SYCL
// device compiler and used by SYCL host compiler (via forced inclusion into
// compiled SYCL source):
// - SYCL kernel names
// - SYCL kernel parameters and offsets of corresponding actual arguments
class SYCLIntegrationHeader {
public:
// Kind of kernel's parameters as captured by the compiler in the
// kernel lambda or function object
enum kernel_param_kind_t {
kind_first,
kind_accessor = kind_first,
kind_std_layout,
kind_sampler,
kind_pointer,
kind_last = kind_pointer
};
public:
SYCLIntegrationHeader(DiagnosticsEngine &Diag, bool UnnamedLambdaSupport,
Sema &S);
/// Emits contents of the header into given stream.
void emit(raw_ostream &Out);
/// Emits contents of the header into a file with given name.
/// Returns true/false on success/failure.
bool emit(const StringRef &MainSrc);
/// Signals that subsequent parameter descriptor additions will go to
/// the kernel with given name. Starts new kernel invocation descriptor.
void startKernel(StringRef KernelName, QualType KernelNameType,
StringRef KernelStableName, SourceLocation Loc);
/// Adds a kernel parameter descriptor to current kernel invocation
/// descriptor.
void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset);
/// Signals that addition of parameter descriptors to current kernel
/// invocation descriptor has finished.
void endKernel();
/// Registers a specialization constant to emit info for it into the header.
void addSpecConstant(StringRef IDName, QualType IDType);
private:
// Kernel actual parameter descriptor.
struct KernelParamDesc {
// Represents a parameter kind.
kernel_param_kind_t Kind = kind_last;
// If Kind is kind_scalar or kind_struct, then
// denotes parameter size in bytes (includes padding for structs)
// If Kind is kind_accessor
// denotes access target; possible access targets are defined in
// access/access.hpp
int Info = 0;
// Offset of the captured parameter value in the lambda or function object.
unsigned Offset = 0;
KernelParamDesc() = default;
};
// Kernel invocation descriptor
struct KernelDesc {
/// Kernel name.
std::string Name;
/// Kernel name type.
QualType NameType;
/// Kernel name with stable lambda name mangling
std::string StableName;
SourceLocation KernelLocation;
/// Descriptor of kernel actual parameters.
SmallVector<KernelParamDesc, 8> Params;
KernelDesc() = default;
};
/// Returns the latest invocation descriptor started by
/// SYCLIntegrationHeader::startKernel
KernelDesc *getCurKernelDesc() {
return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1]
: nullptr;
}
/// Emits a forward declaration for given declaration.
void emitFwdDecl(raw_ostream &O, const Decl *D,
SourceLocation KernelLocation);
/// Emits forward declarations of classes and template classes on which
/// declaration of given type depends. See example in the comments for the
/// implementation.
/// \param O
/// stream to emit to
/// \param T
/// type to emit forward declarations for
/// \param KernelLocation
/// source location of the SYCL kernel function, used to emit nicer
/// diagnostic messages if kernel name is missing
/// \param Emitted
/// a set of declarations forward declrations has been emitted for already
void emitForwardClassDecls(raw_ostream &O, QualType T,
SourceLocation KernelLocation,
llvm::SmallPtrSetImpl<const void *> &Emitted);
private:
/// Keeps invocation descriptors for each kernel invocation started by
/// SYCLIntegrationHeader::startKernel
SmallVector<KernelDesc, 4> KernelDescs;
using SpecConstID = std::pair<QualType, std::string>;
/// Keeps specialization constants met in the translation unit. Maps spec
/// constant's ID type to generated unique name. Duplicates are removed at
/// integration header emission time.
llvm::SmallVector<SpecConstID, 4> SpecConsts;
/// Used for emitting diagnostics.
DiagnosticsEngine &Diag;
/// Whether header is generated with unnamed lambda support
bool UnnamedLambdaSupport;
Sema &S;
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
/// A key method to reduce duplicate debug info from Sema.
virtual void anchor();
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
SYCLIntelFPGAIVDepAttr *
BuildSYCLIntelFPGAIVDepAttr(const AttributeCommonInfo &CI, Expr *Expr1,
Expr *Expr2);
template <typename FPGALoopAttrT>
FPGALoopAttrT *BuildSYCLIntelFPGALoopAttr(const AttributeCommonInfo &A,
Expr *E = nullptr);
LoopUnrollHintAttr *BuildLoopUnrollHintAttr(const AttributeCommonInfo &A,
Expr *E);
OpenCLUnrollHintAttr *
BuildOpenCLLoopUnrollHintAttr(const AttributeCommonInfo &A, Expr *E);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
// When loading a non-modular PCH files, this is used to restore module
// visibility.
void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) {
VisibleModules.setVisible(Mod, ImportLoc);
}
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param,
Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
bool CheckRebuiltAttributedStmtAttributes(ArrayRef<const Attr *> Attrs);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied because it was ill-formed.
void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation,
StringRef Diagnostic);
void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old,
SourceLocation New);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called to set rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
template <typename AttrType>
bool checkRangedIntegralArgument(Expr *E, const AttrType *TmpAttr,
ExprResult &Result);
template <typename AttrType>
void AddOneConstantValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
template <typename AttrType>
void AddOneConstantPowerTwoValueAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
void AddIntelFPGABankBitsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addSYCLIntelPipeIOAttr - Adds a pipe I/O attribute to a particular
/// declaration.
void addSYCLIntelPipeIOAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ID);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
bool checkAllowedSYCLInitializer(VarDecl *VD,
bool CheckValueDependent = false);
// Adds an intel_reqd_sub_group_size attribute to a particular declaration.
void addIntelReqdSubGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
SmallVector<SourceLocation, 4> DeclareTargetNesting;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope.
FunctionDecl *
ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S,
Declarator &D);
/// Register \p FD as specialization of \p BaseFD in the current `omp
/// begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
FunctionDecl *FD, FunctionDecl *BaseFD);
public:
/// Can we exit a scope at the moment.
bool isInOpenMPDeclareVariantScope() {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual SemaDiagnosticBuilder
diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T);
virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) = 0;
virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc);
virtual ~VerifyICEDiagnoser() {}
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
void CheckSYCLKernelCall(FunctionDecl *CallerFunc, SourceRange CallLoc,
ArrayRef<const Expr *> Args);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckIntelFPGARegBuiltinFunctionCall(unsigned BuiltinID, CallExpr *Call);
bool CheckIntelFPGAMemBuiltinFunctionCall(CallExpr *Call);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
private:
// We store SYCL Kernels here and handle separately -- which is a hack.
// FIXME: It would be best to refactor this.
llvm::SetVector<Decl *> SyclDeviceDecls;
// SYCL integration header instance for current compilation unit this Sema
// is associated with.
std::unique_ptr<SYCLIntegrationHeader> SyclIntHeader;
// Used to suppress diagnostics during kernel construction, since these were
// already emitted earlier. Diagnosing during Kernel emissions also skips the
// useful notes that shows where the kernel was called.
bool DiagnosingSYCLKernel = false;
public:
void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.insert(d); }
llvm::SetVector<Decl *> &syclDeviceDecls() { return SyclDeviceDecls; }
/// Lazily creates and returns SYCL integration header instance.
SYCLIntegrationHeader &getSyclIntegrationHeader() {
if (SyclIntHeader == nullptr)
SyclIntHeader = std::make_unique<SYCLIntegrationHeader>(
getDiagnostics(), getLangOpts().SYCLUnnamedLambda, *this);
return *SyclIntHeader.get();
}
enum SYCLRestrictKind {
KernelGlobalVariable,
KernelRTTI,
KernelNonConstStaticDataVariable,
KernelCallVirtualFunction,
KernelUseExceptions,
KernelCallRecursiveFunction,
KernelCallFunctionPointer,
KernelAllocateStorage,
KernelUseAssembly,
KernelCallDllimportFunction,
KernelCallVariadicFunction,
KernelCallUndefinedFunction,
KernelConstStaticVariable
};
bool isKnownGoodSYCLDecl(const Decl *D);
void checkSYCLDeviceVarDecl(VarDecl *Var);
void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC);
void MarkDevice();
void MarkSyclSimd();
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Finishes analysis of the deferred functions calls that may be not
/// properly declared for device compilation.
void finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Tells whether given variable is a SYCL explicit SIMD extension's "private
/// global" variable - global variable in the private address space.
bool isSYCLEsimdPrivateGlobal(VarDecl *VDecl) {
return getLangOpts().SYCLIsDevice && getLangOpts().SYCLExplicitSIMD &&
VDecl->hasGlobalStorage() &&
(VDecl->getType().getAddressSpace() == LangAS::opencl_private);
}
};
template <typename AttrType>
void Sema::AddOneConstantValueAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E) {
AttrType TmpAttr(Context, CI, E);
if (!E->isValueDependent()) {
ExprResult ICE;
if (checkRangedIntegralArgument<AttrType>(E, &TmpAttr, ICE))
return;
E = ICE.get();
}
if (IntelFPGAPrivateCopiesAttr::classof(&TmpAttr)) {
if (!D->hasAttr<IntelFPGAMemoryAttr>())
D->addAttr(IntelFPGAMemoryAttr::CreateImplicit(
Context, IntelFPGAMemoryAttr::Default));
}
D->addAttr(::new (Context) AttrType(Context, CI, E));
}
template <typename AttrType>
void Sema::AddOneConstantPowerTwoValueAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E) {
AttrType TmpAttr(Context, CI, E);
if (!E->isValueDependent()) {
ExprResult ICE;
if (checkRangedIntegralArgument<AttrType>(E, &TmpAttr, ICE))
return;
Expr::EvalResult Result;
E->EvaluateAsInt(Result, Context);
llvm::APSInt Value = Result.Val.getInt();
if (!Value.isPowerOf2()) {
Diag(CI.getLoc(), diag::err_attribute_argument_not_power_of_two)
<< &TmpAttr;
return;
}
if (IntelFPGANumBanksAttr::classof(&TmpAttr)) {
if (auto *BBA = D->getAttr<IntelFPGABankBitsAttr>()) {
unsigned NumBankBits = BBA->args_size();
if (NumBankBits != Value.ceilLogBase2()) {
Diag(TmpAttr.getLocation(), diag::err_bankbits_numbanks_conflicting);
return;
}
}
}
E = ICE.get();
}
if (!D->hasAttr<IntelFPGAMemoryAttr>())
D->addAttr(IntelFPGAMemoryAttr::CreateImplicit(
Context, IntelFPGAMemoryAttr::Default));
// We are adding a user NumBanks, drop any implicit default.
if (IntelFPGANumBanksAttr::classof(&TmpAttr)) {
if (auto *NBA = D->getAttr<IntelFPGANumBanksAttr>())
if (NBA->isImplicit())
D->dropAttr<IntelFPGANumBanksAttr>();
}
D->addAttr(::new (Context) AttrType(Context, CI, E));
}
template <typename FPGALoopAttrT>
FPGALoopAttrT *Sema::BuildSYCLIntelFPGALoopAttr(const AttributeCommonInfo &A,
Expr *E) {
if (!E && !(A.getParsedKind() == ParsedAttr::AT_SYCLIntelFPGALoopCoalesce))
return nullptr;
if (E && !E->isInstantiationDependent()) {
Optional<llvm::APSInt> ArgVal = E->getIntegerConstantExpr(getASTContext());
if (!ArgVal) {
Diag(E->getExprLoc(), diag::err_attribute_argument_type)
<< A.getAttrName() << AANT_ArgumentIntegerConstant
<< E->getSourceRange();
return nullptr;
}
int Val = ArgVal->getSExtValue();
if (A.getParsedKind() == ParsedAttr::AT_SYCLIntelFPGAII ||
A.getParsedKind() == ParsedAttr::AT_SYCLIntelFPGALoopCoalesce) {
if (Val <= 0) {
Diag(E->getExprLoc(), diag::err_attribute_requires_positive_integer)
<< A.getAttrName() << /* positive */ 0;
return nullptr;
}
} else if (A.getParsedKind() ==
ParsedAttr::AT_SYCLIntelFPGAMaxConcurrency ||
A.getParsedKind() ==
ParsedAttr::AT_SYCLIntelFPGAMaxInterleaving ||
A.getParsedKind() ==
ParsedAttr::AT_SYCLIntelFPGASpeculatedIterations) {
if (Val < 0) {
Diag(E->getExprLoc(), diag::err_attribute_requires_positive_integer)
<< A.getAttrName() << /* non-negative */ 1;
return nullptr;
}
} else {
llvm_unreachable("unknown sycl fpga loop attr");
}
}
return new (Context) FPGALoopAttrT(Context, A, E);
}
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
CPhotoconsistencyOdometryCeres.h | /*
* Photoconsistency-Visual-Odometry
* Multiscale Photoconsistency Visual Odometry from RGBD Images
* Copyright (c) 2012, Miguel Algaba Borrego
*
* http://code.google.com/p/photoconsistency-visual-odometry/
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the holder(s) nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "phovo/include/config.h"
//#ifdef PHOVO_WITH_CERES // Check for Ceres-solver
#ifndef _CPHOTOCONSISTENCY_ODOMETRY_CERES_
#define _CPHOTOCONSISTENCY_ODOMETRY_CERES_
#define ENABLE_GAUSSIAN_BLUR 1
#define ENABLE_BOX_FILTER_BLUR 0
#define ENABLE_OPENMP_MULTITHREADING_CERES 0 // Enables OpenMP for CPhotoconsistencyOdometryCeres
#include "CPhotoconsistencyOdometry.h"
#include "third_party/sample.h"
#include "ceres/ceres.h"
#include "opencv2/highgui/highgui.hpp" //visualize iterations
namespace PhotoconsistencyOdometry
{
namespace Ceres
{
using ceres::AutoDiffCostFunction;
using ceres::CostFunction;
using ceres::Problem;
using ceres::Solver;
using ceres::Solve;
using namespace std;
std::vector<cv::Mat> gray0Pyr,gray1Pyr,depth0Pyr,gray1GradXPyr,gray1GradYPyr;
double cameraMatrix[3][3];
int optimizationLevel;
int numOptimizationLevels;
std::vector<int> blurFilterSize;
std::vector<float> imageGradientsScalingFactor;
std::vector<int> max_num_iterations;
std::vector<float> function_tolerance;
std::vector<float> gradient_tolerance;
std::vector<float> parameter_tolerance;
std::vector<float> initial_trust_region_radius;
std::vector<float> max_trust_region_radius;
std::vector<float> min_trust_region_radius;
std::vector<float> min_relative_decrease;
int num_linear_solver_threads;
int num_threads;
bool minimizer_progress_to_stdout;
bool visualizeIterations;
double x[6]; //Parameter vector (x y z yaw pitch roll)
/*!This class computes the rigid (6DoF) transformation that best aligns a pair of RGBD frames using a photoconsistency maximization approach.
To estimate the rigid transformation, this class implements a coarse to fine approach. Thus, the algorithm starts finding a first pose approximation at
a low resolution level and uses the estimate to initialize the optimization at greater image scales. This class uses Ceres autodifferentiation to compute the derivatives of the cost function.*/
class CPhotoconsistencyOdometryCeres : public CPhotoconsistencyOdometry
{
private:
class ResidualRGBDPhotoconsistency {
public:
template <typename T> bool operator()(const T* const stateVector,
T* residuals) const {
//Set camera parameters depending on the optimization level
T fx = T(cameraMatrix[0][0])/pow(2,T(optimizationLevel));
T fy = T(cameraMatrix[1][1])/pow(2,T(optimizationLevel));
T inv_fx = T(1)/fx;
T inv_fy = T(1)/fy;
T ox = T(cameraMatrix[0][2])/pow(2,T(optimizationLevel));
T oy = T(cameraMatrix[1][2])/pow(2,T(optimizationLevel));
//Compute the rigid transformation matrix from the parameters
T x = stateVector[0];
T y = stateVector[1];
T z = stateVector[2];
T yaw = stateVector[3];
T pitch = stateVector[4];
T roll = stateVector[5];
T Rt[4][4];
T sin_yaw = sin(yaw);
T cos_yaw = cos(yaw);
T sin_pitch = sin(pitch);
T cos_pitch = cos(pitch);
T sin_roll = sin(roll);
T cos_roll = cos(roll);
Rt[0][0] = cos_yaw * cos_pitch;
Rt[0][1] = cos_yaw * sin_pitch * sin_roll - sin_yaw * cos_roll;
Rt[0][2] = cos_yaw * sin_pitch * cos_roll + sin_yaw * sin_roll;
Rt[0][3] = x;
Rt[1][0] = sin_yaw * cos_pitch;
Rt[1][1] = sin_yaw * sin_pitch * sin_roll + cos_yaw * cos_roll;
Rt[1][2] = sin_yaw * sin_pitch * cos_roll - cos_yaw * sin_roll;
Rt[1][3] = y;
Rt[2][0] = -sin_pitch;
Rt[2][1] = cos_pitch * sin_roll;
Rt[2][2] = cos_pitch * cos_roll;
Rt[2][3] = z;
Rt[3][0] = T(0);
Rt[3][1] = T(0);
Rt[3][2] = T(0);
Rt[3][3] = T(1);
//Initialize the error function (residuals) with an initial value
#if ENABLE_OPENMP_MULTITHREADING_CERES
#pragma omp parallel for
#endif
for(int r=0;r<gray0Pyr[optimizationLevel].rows;r++)
{
for(int c=0;c<gray0Pyr[optimizationLevel].cols;c++)
{
residuals[gray0Pyr[optimizationLevel].cols*r+c]=T(0);
}
}
T residualScalingFactor = T(1);
#if ENABLE_OPENMP_MULTITHREADING_CERES
#pragma omp parallel for
#endif
for(int r=0;r<gray0Pyr[optimizationLevel].rows;r++)
{
T point3D[4];
T transformedPoint3D[4];
T transformed_r,transformed_c; // 2D coordinates of the transformed pixel(r,c) of frame 1
T pixel1; //Intensity value of the pixel(r,c) of the warped frame 1
T pixel2; //Intensity value of the pixel(r,c) of frame 2
for(int c=0;c<gray0Pyr[optimizationLevel].cols;c++)
{
if(depth0Pyr[optimizationLevel].at<float>(r,c)>0) //If has valid depth value
{
//Compute the local 3D coordinates of pixel(r,c) of frame 1
point3D[2] = T(depth0Pyr[optimizationLevel].at<float>(r,c)); //z
point3D[0] = (T(c)-ox) * point3D[2] * inv_fx; //x
point3D[1] = (T(r)-oy) * point3D[2] * inv_fy; //y
point3D[3] = T(1.0); //homogeneous coordinate
//Transform the 3D point using the transformation matrix Rt
transformedPoint3D[0] = Rt[0][0]*point3D[0]+Rt[0][1]*point3D[1]+Rt[0][2]*point3D[2]+Rt[0][3]*point3D[3];
transformedPoint3D[1] = Rt[1][0]*point3D[0]+Rt[1][1]*point3D[1]+Rt[1][2]*point3D[2]+Rt[1][3]*point3D[3];
transformedPoint3D[2] = Rt[2][0]*point3D[0]+Rt[2][1]*point3D[1]+Rt[2][2]*point3D[2]+Rt[2][3]*point3D[3];
transformedPoint3D[3] = Rt[3][0]*point3D[0]+Rt[3][1]*point3D[1]+Rt[3][2]*point3D[2]+Rt[3][3]*point3D[3];
//Project the 3D point to the 2D plane
transformed_c = ((transformedPoint3D[0] * fx) / transformedPoint3D[2]) + ox; //transformed x (2D)
transformed_r = ((transformedPoint3D[1] * fy) / transformedPoint3D[2]) + oy; //transformed y (2D)
//Asign the intensity value to the warped image and compute the difference between the transformed
//pixel of frame 1 and the corresponding pixel of frame 2. Compute the error function
if(transformed_r>=T(0) && transformed_r < T(gray0Pyr[optimizationLevel].rows) &
transformed_c>=T(0) && transformed_c < T(gray0Pyr[optimizationLevel].cols))
{
//Compute the proyected coordinates of the transformed 3D point
int transformed_r_scalar = static_cast<int>(ceres::JetOps<T>::GetScalar(transformed_r));
int transformed_c_scalar = static_cast<int>(ceres::JetOps<T>::GetScalar(transformed_c));
//Compute the pixel residual
pixel1 = T(gray0Pyr[optimizationLevel].at<float>(r,c));
pixel2 = SampleWithDerivative(gray1Pyr[optimizationLevel],
gray1GradXPyr[optimizationLevel],
gray1GradYPyr[optimizationLevel],transformed_c,transformed_r);
residuals[gray0Pyr[optimizationLevel].cols*transformed_r_scalar+transformed_c_scalar] = residualScalingFactor * (pixel1 - pixel2);
}
}
}
}
return true;
}
};
class VisualizationCallback: public ceres::IterationCallback {
public:
virtual ceres::CallbackReturnType operator()(const ceres::IterationSummary& summary)
{
Eigen::Matrix3f cameraMatrix_eigen;
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
{
cameraMatrix_eigen(i,j)=cameraMatrix[i][j];
}
}
/*Eigen::Matrix4f Rt;
eigenPose(x[0],x[1],x[2],x[3],x[4],x[5],Rt);
std::cout<<"Rt eigen:"<<std::endl<<Rt<<std::endl;
cv::Mat warpedImage;
warpImage<float>(gray0Pyr[optimizationLevel],depth0Pyr[optimizationLevel],warpedImage,Rt,cameraMatrix_eigen,optimizationLevel);
cv::Mat imgDiff;
cv::absdiff(gray1Pyr[optimizationLevel],warpedImage,imgDiff);
cv::imshow("callback: imgDiff",imgDiff);
cv::waitKey(5);*/
Eigen::Matrix4f Rt;
eigenPose(x[0],x[1],x[2],x[3],x[4],x[5],Rt);
std::cout<<"Rt eigen:"<<std::endl<<Rt<<std::endl;
cv::Mat warpedImage;
warpImage<float>(gray0Pyr[0],depth0Pyr[0],warpedImage,Rt,cameraMatrix_eigen);
cv::Mat imgDiff;
cv::absdiff(gray1Pyr[0],warpedImage,imgDiff);
cv::imshow("callback: imgDiff",imgDiff);
cv::waitKey(5);
return ceres::SOLVER_CONTINUE;
}
};
void buildPyramid(cv::Mat & img,std::vector<cv::Mat>& pyramid,int levels,bool applyBlur)
{
//Create space for all the images
pyramid.resize(levels);
double factor = 1;
for(int level=0;level<levels;level++)
{
//Create an auxiliar image of factor times the size of the original image
cv::Mat imgAux;
if(level!=0)
{
cv::resize(img,imgAux,cv::Size(0,0),factor,factor);
}
else
{
imgAux = img;
}
//Blur the resized image with different filter size depending on the current pyramid level
if(applyBlur)
{
#if ENABLE_GAUSSIAN_BLUR
if(blurFilterSize[level]>0)
{
cv::GaussianBlur(imgAux,imgAux,cv::Size(blurFilterSize[level],blurFilterSize[level]),3);
cv::GaussianBlur(imgAux,imgAux,cv::Size(blurFilterSize[level],blurFilterSize[level]),3);
}
#elif ENABLE_BOX_FILTER_BLUR
if(blurFilterSize[level]>0)
{
cv::blur(imgAux,imgAux,cv::Size(blurFilterSize[level],blurFilterSize[level]));
cv::blur(imgAux,imgAux,cv::Size(blurFilterSize[level],blurFilterSize[level]));
}
#endif
}
//Assign the resized image to the current level of the pyramid
pyramid[level]=imgAux;
factor = factor/2;
}
}
void buildDerivativesPyramids(std::vector<cv::Mat>& imagePyramid,std::vector<cv::Mat>& derXPyramid,std::vector<cv::Mat>& derYPyramid)
{
//Compute image gradients
int scale = 1;
int delta = 0;
int ddepth = CV_32FC1;
//Create space for all the derivatives images
derXPyramid.resize(imagePyramid.size());
derYPyramid.resize(imagePyramid.size());
for(int level=0;level<imagePyramid.size();level++)
{
// Compute the gradient in x
cv::Mat imgGray1_grad_x;
cv::Scharr( imagePyramid[level], derXPyramid[level], ddepth, 1, 0, imageGradientsScalingFactor[level], delta, cv::BORDER_DEFAULT );
// Compute the gradient in y
cv::Mat imgGray1_grad_y;
cv::Scharr( imagePyramid[level], derYPyramid[level], ddepth, 0, 1, imageGradientsScalingFactor[level], delta, cv::BORDER_DEFAULT );
}
}
public:
CPhotoconsistencyOdometryCeres(){};
~CPhotoconsistencyOdometryCeres(){};
/*!Sets the 3x3 matrix of (pinhole) camera intrinsic parameters used to obtain the 3D colored point cloud from the RGB and depth images.*/
void setCameraMatrix(Eigen::Matrix3f & camMat)
{
for(int i=0;i<3;i++)
{
for(int j=0;j<3;j++)
{
cameraMatrix[i][j]=camMat(i,j);
}
}
}
/*!Sets the source (Intensity+Depth) frame.*/
void setSourceFrame(cv::Mat & imgGray,cv::Mat & imgDepth)
{
//Create a float auxialiary image from the imput image
cv::Mat imgGrayFloat;
imgGray.convertTo(imgGrayFloat, CV_32FC1, 1./255 );
//Compute image pyramids for the grayscale and depth images
buildPyramid(imgGrayFloat,gray0Pyr,numOptimizationLevels,true);
buildPyramid(imgDepth,depth0Pyr,numOptimizationLevels,false);
}
/*!Sets the source (Intensity+Depth) frame. Depth image is ignored*/
void setTargetFrame(cv::Mat & imgGray,cv::Mat & imgDepth)
{
//Create a float auxialiary image from the imput image
cv::Mat imgGrayFloat;
imgGray.convertTo(imgGrayFloat, CV_32FC1, 1./255 );
//Compute image pyramids for the grayscale and depth images
buildPyramid(imgGrayFloat,gray1Pyr,numOptimizationLevels,true);
//Compute image pyramids for the gradients images
buildDerivativesPyramids(gray1Pyr,gray1GradXPyr,gray1GradYPyr);
}
/*!Initializes the state vector to a certain value. The optimization process uses the initial state vector as the initial estimate.*/
void setInitialStateVector(const std::vector<double> & initialStateVector)
{
x[0] = initialStateVector[0];
x[1] = initialStateVector[1];
x[2] = initialStateVector[2];
x[3] = initialStateVector[3];
x[4] = initialStateVector[4];
x[5] = initialStateVector[5];
}
/*!Launches the least-squares optimization process to find the configuration of the state vector parameters that maximizes the photoconsistency between the source and target frame.*/
void optimize()
{
for(optimizationLevel = numOptimizationLevels-1;optimizationLevel>=0;optimizationLevel--)
{
// Build the problem.
Problem problem;
// Set up the only cost function (also known as residual). This uses
// auto-differentiation to obtain the derivative (jacobian).
problem.AddResidualBlock(
new AutoDiffCostFunction<ResidualRGBDPhotoconsistency,ceres::DYNAMIC,6>(
new ResidualRGBDPhotoconsistency,
gray0Pyr[optimizationLevel].cols*gray0Pyr[optimizationLevel].rows /*dynamic size*/),
NULL,
x);
// Run the solver!
Solver::Options options;
options.max_num_iterations = max_num_iterations[optimizationLevel];
options.linear_solver_type = ceres::SPARSE_NORMAL_CHOLESKY;//ceres::DENSE_QR;
options.minimizer_progress_to_stdout = minimizer_progress_to_stdout;
options.function_tolerance = function_tolerance[optimizationLevel];
options.gradient_tolerance = gradient_tolerance[optimizationLevel];
options.parameter_tolerance = parameter_tolerance[optimizationLevel];
options.initial_trust_region_radius = initial_trust_region_radius[optimizationLevel];
options.max_trust_region_radius = max_trust_region_radius[optimizationLevel];
options.min_trust_region_radius = min_trust_region_radius[optimizationLevel];
options.min_relative_decrease = min_relative_decrease[optimizationLevel];
options.num_linear_solver_threads = num_linear_solver_threads;
options.num_threads = num_threads;
options.max_num_consecutive_invalid_steps = 0;
VisualizationCallback callback;
if(visualizeIterations)
{
options.update_state_every_iteration = true;
options.callbacks.push_back(&callback);
}
else
{
options.update_state_every_iteration = false;
}
Solver::Summary summary;
Solve(options, &problem, &summary);
std::cout << summary.BriefReport() << "\n";
}
//After all the optimization process the optimization level is 0
optimizationLevel = 0;
}
/*!Returns the optimal state vector. This method has to be called after calling the optimize() method.*/
void getOptimalStateVector(std::vector<double> & optimalStateVector)
{
optimalStateVector[0] = x[0];
optimalStateVector[1] = x[1];
optimalStateVector[2] = x[2];
optimalStateVector[3] = x[3];
optimalStateVector[4] = x[4];
optimalStateVector[5] = x[5];
}
/*!Returns the optimal 4x4 rigid transformation matrix between the source and target frame. This method has to be called after calling the optimize() method.*/
void getOptimalRigidTransformationMatrix(Eigen::Matrix4f & optimal_Rt)
{
eigenPose(x[0],x[1],x[2],
x[3],x[4],x[5],optimal_Rt);
}
/*!Reads the configuration parameters from a .yml file.*/
void readConfigurationFile(std::string fileName)
{
cv::FileStorage fs(fileName, cv::FileStorage::READ);
//Read the number of optimization levels
fs["numOptimizationLevels"] >> numOptimizationLevels;
#if ENABLE_GAUSSIAN_BLUR || ENABLE_BOX_FILTER_BLUR
//Read the blur filter size at every pyramid level
fs["blurFilterSize (at each level)"] >> blurFilterSize;
#endif
//Read the scaling factor for each gradient image at each level
fs["imageGradientsScalingFactor (at each level)"] >> imageGradientsScalingFactor;
//Read the number of Levenberg-Marquardt iterations at each optimization level
fs["max_num_iterations (at each level)"] >> max_num_iterations;
//Read optimizer function tolerance at each level
fs["function_tolerance (at each level)"] >> function_tolerance;
//Read optimizer gradient tolerance at each level
fs["gradient_tolerance (at each level)"] >> gradient_tolerance;
//Read optimizer parameter tolerance at each level
fs["parameter_tolerance (at each level)"] >> parameter_tolerance;
//Read optimizer initial trust region at each level
fs["initial_trust_region_radius (at each level)"] >> initial_trust_region_radius;
//Read optimizer max trust region radius at each level
fs["max_trust_region_radius (at each level)"] >> max_trust_region_radius;
//Read optimizer min trust region radius at each level
fs["min_trust_region_radius (at each level)"] >> min_trust_region_radius;
//Read optimizer min LM relative decrease at each level
fs["min_relative_decrease (at each level)"] >> min_relative_decrease;
//Read the number of threads for the linear solver
fs["num_linear_solver_threads"] >> num_linear_solver_threads;
//Read the number of threads for the jacobian computation
fs["num_threads"] >> num_threads;
//Read the boolean value to determine if print the minimization progress or not
fs["minimizer_progress_to_stdout"] >> minimizer_progress_to_stdout;
//Read the boolean value to determine if visualize the progress images or not
fs["visualizeIterations"] >> visualizeIterations;
}
};
} //end namespace Ceres
} //end namespace PhotoconsistencyOdometry
#endif
//#endif // Check for Ceres-solver
|
sparseraster.h | #pragma once
#include "gdx/cell.h"
#include "gdx/cpupredicates-private.h"
#include "gdx/exception.h"
#include "gdx/nodatapredicates-private.h"
#include "gdx/rasterchecks.h"
#include "gdx/rastermetadata.h"
#include "gdx/sparserasteriterator.h"
#include "infra/cast.h"
#include "infra/span.h"
#include <Eigen/SparseCore>
#include <algorithm>
#include <cassert>
#include <vector>
namespace gdx {
template <typename T>
class SparseRaster
{
public:
using value_type = T;
using size_type = int32_t;
using data_type = Eigen::SparseMatrix<T, Eigen::RowMajor>;
using nodata_type = std::optional<value_type>;
using pointer = T*;
using const_pointer = const T*;
using iterator = SparseMatrixIterator<T, false>;
using const_iterator = SparseMatrixIterator<T, true>;
static constexpr bool raster_type_has_nan = std::numeric_limits<T>::has_quiet_NaN;
static constexpr T NaN = std::numeric_limits<T>::quiet_NaN();
static constexpr bool has_nan()
{
return raster_type_has_nan;
}
SparseRaster() = default;
SparseRaster(int32_t rows, int32_t cols)
: _meta(rows, cols)
, _data(rows, cols)
{
}
SparseRaster(RasterMetadata meta)
: _meta(std::move(meta))
, _data(_meta.rows, _meta.cols)
{
throwOnInvalidMetadata();
}
SparseRaster(int32_t rows, int32_t cols, T fillValue)
: SparseRaster(RasterMetadata(rows, cols), fillValue)
{
}
SparseRaster(const RasterMetadata& meta, T fillValue)
: _meta(meta)
, _data(meta.rows, meta.cols)
{
if constexpr (raster_type_has_nan) {
// make sure we fill tha raster with NaNs if the fill value is the nodata value
if (_meta.nodata.has_value() && fillValue == static_cast<T>(*_meta.nodata)) {
fillValue = NaN;
}
}
fill(fillValue);
}
SparseRaster(int32_t rows, int32_t cols, std::span<const T> data)
: SparseRaster(RasterMetadata(rows, cols), data)
{
}
SparseRaster(const RasterMetadata& meta, std::span<const T> data)
: _meta(meta)
, _data(meta.rows, meta.cols)
{
throwOnInvalidMetadata();
throwOnDataSizeMismatch(meta.rows, meta.cols, data.size());
initMatrixValues(data);
}
SparseRaster(const RasterMetadata& meta, data_type&& data)
: _meta(meta)
, _data(data)
{
}
SparseRaster(const SparseRaster<T>& other)
: _meta(other._meta)
, _data(other._data)
{
fmt::print("!! Raster copy: should not happen !!");
}
SparseRaster(SparseRaster<T>&&) = default;
SparseRaster& operator=(SparseRaster<T>&&) = default;
SparseRaster& operator=(const SparseRaster<T>& other)
{
if (this != &other) {
_meta = other._meta;
_data = other._data;
}
return *this;
}
void resize_and_fill(int32_t rows, int32_t cols, value_type value)
{
resize(rows, cols);
fill(value);
}
void resize(int32_t rows, int32_t cols)
{
_meta.rows = rows;
_meta.cols = cols;
_data.resize(rows, cols);
}
void resize(int32_t rows, int32_t cols, std::optional<double> nodata)
{
_meta.rows = rows;
_meta.cols = cols;
_meta.nodata = nodata;
_data.resize(rows, cols);
throwOnInvalidMetadata();
}
void set_metadata(RasterMetadata meta)
{
if (meta.rows * meta.cols != size()) {
throw InvalidArgument("Cannot change metadata: invalid size");
}
_meta = std::move(meta);
}
SparseRaster<T> copy() const
{
SparseRaster<T> dst(_meta);
dst._data = _data;
return dst;
}
iterator begin()
{
return iterator(_data, nodata().value_or(std::numeric_limits<T>::max()));
}
const_iterator begin() const
{
return const_iterator(_data, nodata().value_or(std::numeric_limits<T>::max()));
}
const_iterator cbegin() const
{
return begin();
}
iterator end()
{
return iterator();
}
const_iterator end() const
{
return const_iterator();
}
const_iterator cend() const
{
return end();
}
auto value_begin()
{
return SparseMatrixValueIterator<value_type, false>(_data);
}
auto value_begin() const
{
return SparseMatrixValueIterator<value_type, true>(_data);
}
auto value_end()
{
return SparseMatrixValueIterator<value_type, false>();
}
auto value_end() const
{
return SparseMatrixValueIterator<value_type, true>();
}
auto value_cend()
{
return SparseMatrixValueIterator<value_type, true>();
}
auto value_cbegin() const
{
return value_begin();
}
auto value_cend() const
{
return value_end();
}
bool has_nodata() const noexcept
{
return _data.nonZeros() != 0;
}
std::optional<T> nodata() const noexcept
{
return inf::optional_cast<T>(_meta.nodata);
}
std::size_t size() const noexcept
{
return _data.size();
}
std::ptrdiff_t ssize() const noexcept
{
assert(_data.size() <= std::numeric_limits<std::ptrdiff_t>::max());
return static_cast<std::ptrdiff_t>(_data.size());
}
void collapse_data()
{
// no collapse needed
}
const RasterMetadata& metadata() const noexcept
{
return _meta;
}
void set_projection(int32_t epsg)
{
_meta.set_projection_from_epsg(epsg);
}
void set_nodata(double newValue)
{
if constexpr (!raster_type_has_nan) {
if (std::isnan(newValue)) {
throw InvalidArgument("Nodata value cannot be NaN for integral rasters");
}
}
_meta.nodata = newValue;
}
/*void replaceNodata(double newValue)
{
if constexpr (!raster_type_has_nan) {
if (std::isnan(newValue)) {
throw InvalidArgument("Integral rasters cannot have NaN values");
}
}
}*/
/*void turn_value_into_nodata(T value)
{
const auto dataSize = _data.size();
for (int i = 0; i < dataSize; ++i) {
if (_data(i) == value) {
mark_as_nodata(i);
}
}
}*/
// assigns the value to all the elements of the raster, even nodata
void fill(value_type value)
{
// TODO
_data.fill(value);
}
// assigns the value to all the elements of the raster, leaving nodata values intact
void fill_values(value_type value)
{
_data.fill(value);
}
// Makes all elements of the raster nodata values
void fill_with_nodata()
{
_data.setZero();
}
int32_t rows() const noexcept
{
assert(_meta.rows == _data.rows());
return _meta.rows;
}
int32_t cols() const noexcept
{
assert(_meta.cols == _data.cols());
return _meta.cols;
}
void mark_as_data(int32_t /*index*/) noexcept
{
}
void mark_as_data(Cell /*cell*/) noexcept
{
}
void mark_as_data(int32_t /*row*/, int32_t /*col*/) noexcept
{
}
void mark_as_nodata(int32_t index)
{
auto [r, c] = indexToRowCol(index);
mark_as_nodata(r, c);
}
void mark_as_nodata(int32_t row, int32_t col)
{
_data.prune([=](const int32_t& r, const int32_t& c, const T& /*value*/) {
return row == r && col == c;
});
}
std::optional<value_type> optional_value(int32_t index) const noexcept
{
if (is_nodata(index)) {
return std::optional<value_type>();
} else {
return _data.coeff();
}
}
template <typename VarType>
std::optional<VarType> optional_value_as(int32_t index) const noexcept
{
if (is_nodata(index)) {
return std::optional<VarType>();
} else {
return static_cast<VarType>(_data(index));
}
}
bool is_nodata_value(T value) const noexcept
{
return value == nodata();
}
bool is_nodata(int32_t index) const noexcept
{
auto [row, col] = indexToRowCol(index);
return is_nodata(row, col);
}
bool is_nodata(const Cell& cell) const noexcept
{
return is_nodata(cell.r, cell.c);
}
bool is_nodata(int32_t r, int32_t c) const noexcept
{
auto* innerNonZeros = _data.innerNonZeroPtr();
auto* outerIndex = _data.outerIndexPtr();
auto nod = nodata().value();
auto end = innerNonZeros ? outerIndex[r] + innerNonZeros[r] : outerIndex[r + 1];
return _data.data().atInRange(outerIndex[r], end, typename data_type::StorageIndex(c), nod) == nod;
}
// bool tolerant_equal_to(const SparseRaster<T>& other, value_type tolerance = std::numeric_limits<value_type>::epsilon()) const noexcept
// {
// if (_meta != other._meta) {
// return false;
// }
// return tolerant_data_equal_to(other, tolerance);
// }
// bool tolerant_data_equal_to(const SparseRaster<T>& other, value_type relTolerance = value_type(1e-05)) const noexcept
// {
// throw_on_size_mismatch(*this, other);
// return _data == other._data;
// }
bool operator==(const SparseRaster<T>& other) const noexcept
{
throw_on_size_mismatch(*this, other);
return (_data - other._data).norm() == 0;
}
bool operator!=(const SparseRaster<T>& other) const noexcept
{
return !(*this == other);
}
/*SparseRaster<uint8_t> not_equals(const SparseRaster<T>& other) const noexcept
{
throw_on_size_mismatch(*this, other);
return performBinaryOperation<nodata::not_equal_to>(other);
}
template <typename TValue>
SparseRaster<uint8_t> not_equals(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
return performUnaryOperation<nodata::not_equal_to>(value);
}*/
template <typename TOther>
auto operator+(const SparseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
return SparseRaster(_meta, _data + other._data);
}
template <typename TValue>
auto operator+(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
SparseRaster rasterCopy = copy();
rasterCopy += value;
return rasterCopy;
}
SparseRaster<T>& operator+=(T value)
{
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
std::for_each(value_begin(), value_end(), [=](T& cellValue) {
cellValue += static_cast<T>(value);
});
return *this;
}
template <typename TOther>
SparseRaster<T>& operator+=(const SparseRaster<TOther>& other)
{
_data += other._data;
return *this;
}
SparseRaster<T> operator-() const
{
if constexpr (std::is_unsigned_v<T>) {
throw RuntimeError("Minus operator applied to unsigned value");
} else {
return SparseRaster<T>(_meta, -_data);
}
}
template <typename TOther>
auto operator-(const SparseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
return SparseRaster<T>(_meta, _data - other._data);
}
template <typename TValue>
auto operator-(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
SparseRaster rasterCopy = copy();
rasterCopy -= value;
return rasterCopy;
}
SparseRaster<T>& operator-=(T value)
{
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
std::for_each(value_begin(), value_end(), [=](T& cellValue) {
cellValue -= static_cast<T>(value);
});
return *this;
}
template <typename TOther>
auto operator*(const SparseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
return SparseRaster<T>(_meta, _data * other._data);
}
template <typename TValue>
auto operator*(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
return SparseRaster(_meta, _data * static_cast<T>(value));
}
SparseRaster<T>& operator*=(T value)
{
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
_data *= value;
return *this;
}
//template <typename TOther>
//auto operator/(const SparseRaster<TOther>& other) const
//{
// throw_on_size_mismatch(*this, other);
// using TResult = decltype(0.f * TOther()); // use float or double as result type
// SparseRaster<TResult> result(_meta);
// if (!_meta.nodata.has_value() && other.metadata().nodata.has_value()) {
// result.set_nodata(*other.metadata().nodata);
// }
// if (!result.nodata().has_value()) {
// result.set_nodata(std::numeric_limits<TResult>::quiet_NaN());
// }
// TResult nodata = result.nodata().value();
// if constexpr (std::numeric_limits<TResult>::has_quiet_NaN) {
// nodata = std::numeric_limits<TResult>::quiet_NaN();
// }
// auto operation = nodata::divides<TResult>(_meta.nodata, other.metadata().nodata);
// for (int32_t i = 0; i < size(); ++i) {
// auto v = other[i];
// if (v == 0) {
// result[i] = nodata;
// } else {
// if (is_nodata(i) || other.is_nodata(i)) {
// result[i] = nodata;
// } else {
// result[i] = static_cast<TResult>(_data(i)) / other[i];
// }
// }
// }
// return result;
//}
template <typename TValue>
auto operator/(TValue value) const
{
static_assert(std::is_scalar_v<TValue>, "Arithmetic operation called with non scalar type");
if (value == 0) {
throw InvalidArgument("Division by zero");
}
return SparseRaster(_meta, _data / static_cast<T>(value));
}
SparseRaster<T>& operator/=(T value)
{
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
_data /= value;
return *this;
}
value_type& operator[](int32_t index)
{
auto [row, col] = indexToRowCol(index);
return (*this)(row, col);
}
value_type operator[](int32_t index) const
{
auto [row, col] = indexToRowCol(index);
return (*this)(row, col);
}
value_type& operator[](const Cell& cell)
{
return (*this)(cell.r, cell.c);
}
value_type operator[](const Cell& cell) const
{
return (*this)(cell.r, cell.c);
}
value_type& operator()(int32_t row, int32_t col)
{
return _data.coeffRef(row, col);
}
value_type operator()(int32_t row, int32_t col) const
{
return _data.coeff(row, col);
}
/*SparseRaster<uint8_t> operator!() const
{
return performUnaryOperation<nodata::logical_not>();
}
template <typename TOther>
SparseRaster<uint8_t> operator&&(const SparseRaster<TOther>& other) const
{
return performBinaryOperation<nodata::logical_and>(other);
}
template <typename TOther>
SparseRaster<uint8_t> operator||(const SparseRaster<TOther>& other) const
{
return performBinaryOperation<nodata::logical_or>(other);
}
template <typename TOther>
SparseRaster<uint8_t> operator>(const SparseRaster<TOther>& other) const
{
return performBinaryOperation<nodata::greater>(other);
}
SparseRaster<uint8_t> operator>(T threshold) const
{
return performUnaryOperation<nodata::greater>(threshold);
}
template <typename TOther>
SparseRaster<uint8_t> operator>=(const SparseRaster<TOther>& other) const
{
return performBinaryOperation<nodata::greater_equal>(other);
}
SparseRaster<uint8_t> operator>=(T threshold) const
{
return performUnaryOperation<nodata::greater_equal>(threshold);
}
template <typename TOther>
SparseRaster<uint8_t> operator<(const SparseRaster<TOther>& other) const
{
return performBinaryOperation<nodata::less>(other);
}
SparseRaster<uint8_t> operator<(T threshold) const
{
return performUnaryOperation<nodata::less>(threshold);
}
template <typename TOther>
SparseRaster<uint8_t> operator<=(const SparseRaster<TOther>& other) const
{
return performBinaryOperation<nodata::less_equal>(other);
}
SparseRaster<uint8_t> operator<=(T threshold) const
{
return performUnaryOperation<nodata::less_equal>(threshold);
}*/
void replace(T oldValue, T newValue) noexcept
{
std::replace(begin(), end(), oldValue, newValue);
}
std::string to_string() const
{
std::ostringstream ss;
ss << _data;
return ss.str();
}
private:
std::tuple<int32_t, int32_t> indexToRowCol(int32_t index) const
{
int row = index / inf::truncate<int>(_data.cols());
int col = index - (row * inf::truncate<int>(_data.cols()));
return {row, col};
}
void initMatrixValues(std::span<const T> data)
{
assert(nodata().has_value());
const T nod = nodata().value();
std::vector<Eigen::Triplet<T>> tripletList;
for (int r = 0; r < _meta.rows; ++r) {
const int rowStart = r * _meta.cols;
for (int c = 0; c < _meta.cols; ++c) {
if (data[rowStart + c] != nod) {
tripletList.push_back(Eigen::Triplet<T>(r, c, data[rowStart + c]));
}
}
}
_data.setFromTriplets(tripletList.begin(), tripletList.end());
}
void throwOnInvalidMetadata()
{
if (!_meta.nodata.has_value()) {
throw RuntimeError("Sparse rasters must have a nodata value");
}
}
static void throwOnDataSizeMismatch(int32_t rows, int32_t cols, size_t dataSize)
{
if (static_cast<size_t>(rows * cols) != dataSize) {
throw InvalidArgument("Raster data size does not match provided dimensions {} vs {}x{}", dataSize, rows, cols);
}
}
// Performs a unary operation on all the elements that results in true or false
template <template <typename> typename BinaryPredicate, typename TOther>
SparseRaster<uint8_t> perform_unary_operation(TOther value) const
{
SparseRaster<uint8_t> result(_meta);
if (_meta.nodata.has_value()) {
result.set_nodata(static_cast<double>(std::numeric_limits<uint8_t>::max()));
}
auto pred = BinaryPredicate<T>(_meta.nodata, std::optional<double>());
const auto size = result.size();
#pragma omp parallel for
for (std::size_t i = 0; i < size; ++i) {
result[i] = pred(_data(i), static_cast<T>(value));
}
return result;
}
template <template <typename> typename UnaryPredicate>
SparseRaster<uint8_t> performUnaryOperation() const
{
SparseRaster<uint8_t> result(_meta);
if (_meta.nodata) {
result.set_nodata(static_cast<double>(std::numeric_limits<uint8_t>::max()));
}
std::transform(cbegin(), cend(), result.begin(), UnaryPredicate<T>(_meta.nodata));
return result;
}
template <template <typename> typename BinaryPredicate, typename TOther>
SparseRaster<uint8_t> perform_binary_operation(const SparseRaster<TOther>& other) const
{
throw_on_size_mismatch(*this, other);
using WidestType = decltype(T() * TOther());
SparseRaster<uint8_t> result(_meta);
if (_meta.nodata.has_value() || other.metadata().nodata.has_value()) {
result.set_nodata(std::numeric_limits<uint8_t>::max());
}
auto pred = BinaryPredicate<WidestType>(_meta.nodata, other.metadata().nodata);
const auto size = result.size();
#pragma omp parallel for
for (std::size_t i = 0; i < size; ++i) {
result[i] = pred(static_cast<WidestType>(_data(i)), static_cast<WidestType>(other[i]));
}
return result;
}
template <template <typename> typename UnaryPredicate, typename TScalar>
auto perform_scalar_operation(TScalar scalar) const
{
using WidestType = decltype(T() * TScalar());
auto pred = UnaryPredicate<WidestType>(_meta.nodata, static_cast<WidestType>(scalar));
SparseRaster<WidestType> result(_meta);
std::transform(cbegin(), cend(), result.begin(), [this, pred](T value) {
if (is_nodata_value(value)) {
return value;
}
return pred(value);
});
return result;
}
template <template <typename> typename BinaryPredicate, typename TOther>
auto perform_raster_operation(const SparseRaster<TOther>& other) const
{
using WidestType = decltype(T() * TOther());
SparseRaster<WidestType> result(_meta);
if (!_meta.nodata.has_value() && other.metadata().nodata.has_value()) {
result.set_nodata(*other.metadata().nodata);
}
auto operation = BinaryPredicate<WidestType>();
auto nodata = result.nodata().value_or(0);
if constexpr (std::numeric_limits<WidestType>::has_quiet_NaN) {
nodata = std::numeric_limits<WidestType>::quiet_NaN();
}
#pragma omp parallel for
for (std::size_t i = 0; i < size(); ++i) {
if (is_nodata(i) || other.is_nodata(i)) {
result[i] = nodata;
} else {
result[i] = operation(static_cast<WidestType>(_data(i)), static_cast<WidestType>(other[i]));
}
}
return result;
}
RasterMetadata _meta;
data_type _data;
};
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
SparseRaster<T> operator+(TScalar lhs, const SparseRaster<T>& rhs)
{
return rhs + lhs;
}
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
auto operator-(TScalar value, const SparseRaster<T>& rhs)
{
using ResultType = decltype(TScalar() - T());
SparseRaster<ResultType> result(rhs.metadata());
std::transform(begin(rhs), end(rhs), begin(result), nodata::minus_scalar_first<ResultType>(rhs.metadata().nodata, static_cast<ResultType>(value)));
return result;
}
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
SparseRaster<T> operator*(TScalar lhs, const SparseRaster<T>& rhs)
{
return rhs * lhs;
}
template <typename TScalar, typename T, typename = std::enable_if_t<std::is_scalar_v<TScalar>>>
auto operator/(TScalar scalar, const SparseRaster<T>& rhs)
{
//throw_on_size_mismatch(other);
//// For nan nodata, standard eigen operator can be used
//if constexpr (typeHasNaN() && std::is_same_v<T, TOther>) {
// // all types are the same, no casts needed
// return SparseRaster<T>(_meta, _data / other._data);
//}
//return performRasterOperation<nodata::divides>(other);
using ResultType = decltype(1.0f * T());
static_assert(std::is_scalar_v<T>, "Arithmetic operation called with non scalar type");
SparseRaster<ResultType> result(rhs.metadata());
for (std::size_t i = 0; i < rhs.size(); ++i) {
auto value = rhs[i];
if (value == 0) {
if (!result.nodata().has_value()) {
throw InvalidArgument("Division by raster that contains 0 values");
}
result.mark_as_nodata(i);
} else {
result[i] = scalar / static_cast<ResultType>(value);
}
}
return result;
}
template <typename T>
auto cbegin(const SparseRaster<T>& ras)
{
return ras.data();
}
template <typename T>
auto cend(const SparseRaster<T>& ras)
{
return ras.cend();
}
template <typename T>
auto begin(SparseRaster<T>& ras)
{
return ras.begin();
}
template <typename T>
auto begin(const SparseRaster<T>& ras)
{
return ras.begin();
}
template <typename T>
auto end(SparseRaster<T>& ras)
{
return ras.end();
}
template <typename T>
auto end(const SparseRaster<T>& ras)
{
return ras.cend();
}
template <typename T>
auto size(const SparseRaster<T>& ras)
{
return ras.size();
}
template <typename T>
auto value_cbegin(const SparseRaster<T>& ras)
{
return ras.value_data();
}
template <typename T>
auto value_cend(const SparseRaster<T>& ras)
{
return ras.value_cend();
}
template <typename T>
auto value_begin(SparseRaster<T>& ras)
{
return ras.value_begin();
}
template <typename T>
auto value_begin(const SparseRaster<T>& ras)
{
return ras.value_begin();
}
template <typename T>
auto value_end(SparseRaster<T>& ras)
{
return ras.value_end();
}
template <typename T>
auto value_end(const SparseRaster<T>& ras)
{
return ras.value_cend();
}
}
|
rnn_impl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file rnn_impl.h
* \brief
* \author Shu Zhang
*/
#ifndef MXNET_OPERATOR_RNN_IMPL_H_
#define MXNET_OPERATOR_RNN_IMPL_H_
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <algorithm>
#include <random>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include "./math.h"
#include "./math_functions-inl.h"
#include "./operator_common.h"
#include "./mshadow_op.h"
#include "./linalg.h"
namespace mxnet {
namespace op {
template <typename DType>
inline DType sigmoid(DType x) {
return 1.0f / (1.0f + exp(-x));
}
template <typename DType>
inline DType relu(DType x) {
return x > 0.0f ? static_cast<float>(x) : 0.0f;
}
template <typename DType>
void LstmForwardTrainingSingleLayer(DType* ws,
DType* rs,
bool state_outputs,
bool bid,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType>& x,
const Tensor<cpu, 2, DType>& hx,
const Tensor<cpu, 2, DType>& cx,
const Tensor<cpu, 3, DType>& y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H));
const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
DType* c_ptr = bid ? rs + T * N * H * 7 : rs;
Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const index_t cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (index_t i = 0; i < T; ++i) {
index_t t = bid ? T - 1 - i : i;
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
#pragma omp parallel for num_threads(omp_threads)
for (index_t jk = 0; jk < cell_size; ++jk) {
index_t j = jk / H;
index_t k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[i - 1][j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
h[j][k] = ht;
// reserve
y[t][j][k + offset] = ht;
c[i][j][k] = ct;
ifgo[i][j][k][0] = it;
ifgo[i][j][k][1] = ft;
ifgo[i][j][k][2] = gt;
ifgo[i][j][k][3] = ot;
if (i == T - 1 && state_outputs) {
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
}
}
}
}
template <typename DType>
void LstmForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr,
const float dropout,
std::mt19937& rnd_engine) { // NOLINT(runtime/references)
DType* dropout_random = rs;
DType* rs2 = dropout_random + (L - 1) * D * T * N * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const index_t b_size = 2 * H * 4;
const index_t r_size = D * T * N * H * 6;
const index_t y_offset = T * N * H * 5;
const index_t cell_size = N * H;
int idx = 0; // state & cell state's idx;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int i = 0; i < L; ++i) {
const index_t input_size = i ? H * D : I;
const index_t w_size = (input_size + H) * H * 4;
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(rs2 + y_offset, Shape3(T, N, H * D));
LstmForwardTrainingSingleLayer<DType>(ws,
rs2,
state_outputs,
false,
T,
N,
input_size,
H,
x,
hx[idx],
cx[idx],
y,
w_ptr,
b_ptr,
hy_ptr,
cy_ptr);
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
LstmForwardTrainingSingleLayer<DType>(ws,
rs2,
state_outputs,
true,
T,
N,
input_size,
H,
x,
hx[idx],
cx[idx],
y,
w_ptr,
b_ptr,
hy_ptr,
cy_ptr);
}
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
if (dropout > 0.0f) {
std::uniform_real_distribution<float> distribution(0, 1);
for (index_t j = 0; j < T * N * H * D; j++) {
if (distribution(rnd_engine) < dropout) {
dropout_random[i * T * N * H * D + j] = 0;
y.dptr_[j] = 0;
} else {
dropout_random[i * T * N * H * D + j] = 1.0f - dropout;
y.dptr_[j] = y.dptr_[j] / (1.0f - dropout);
}
}
}
x_ptr = y.dptr_;
rs2 += r_size;
++idx;
if (state_outputs) {
hy_ptr += cell_size;
cy_ptr += cell_size;
}
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = (rs2 + y_offset)[i];
}
}
template <typename DType>
void LstmForwardInferenceSingleLayer(DType* ws,
bool state_outputs,
bool bid,
const index_t T,
const index_t N,
const index_t I,
const int H,
const int P,
const Tensor<cpu, 2, DType>& x,
const Tensor<cpu, 2, DType>& hx,
const Tensor<cpu, 2, DType>& cx,
const Tensor<cpu, 3, DType>& y,
DType* w_ptr,
DType* b_ptr,
DType* hy_ptr,
DType* cy_ptr) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, (P ? P : H)));
Tensor<cpu, 2, DType> whr(w_ptr, Shape2(1, 1));
if (P > 0)
whr = Tensor<cpu, 2, DType>(wh.dptr_ + P * 4 * H, Shape2(P, H));
const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H));
const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H));
Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4));
Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4));
const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H));
const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H));
Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> r(hy_ptr, Shape2(1, 1));
if (P > 0)
r = Tensor<cpu, 2, DType>(hy_ptr, Shape2(N, P));
const int offset = bid ? H : 0;
const int proj_offset = bid ? P : 0;
const DType alpha = 1.0;
const DType beta = 0.0;
const index_t cell_size = N * H;
linalg_gemm(x, wx, yx_flat, alpha, beta, false, true);
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (index_t i = 0; i < T; ++i) {
index_t t = bid ? T - 1 - i : i;
if (P > 0) {
linalg_gemm(i ? r : hx, wh, yh_flat, alpha, beta, false, true);
} else {
linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true);
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t jk = 0; jk < cell_size; ++jk) {
int j = jk / H;
int k = jk % H;
DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]);
DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]);
DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]);
DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]);
DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt;
DType ht = ot * tanh(ct);
if (P == 0)
y[t][j][k + offset] = ht;
if (i == T - 1 && state_outputs) {
if (P == 0)
hy_ptr[jk] = ht;
cy_ptr[jk] = ct;
} else {
c[j][k] = ct;
}
h[j][k] = ht;
}
if (P > 0) {
linalg_gemm(h, whr, r, alpha, beta, false, true);
#pragma GCC diagnostic push
#if __GNUC__ >= 8
#pragma GCC diagnostic ignored "-Wclass-memaccess"
#endif
#pragma omp parallel for num_threads(omp_threads)
for (int j = 0; j < N; ++j) {
std::memcpy(y[t][j].dptr_ + proj_offset, r[j].dptr_, P * sizeof(DType));
}
#pragma GCC diagnostic pop
}
}
}
template <typename DType>
void LstmForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const int P,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* b_ptr,
DType* y_ptr,
DType* hy_ptr,
DType* cy_ptr) {
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, P ? P : H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
const index_t b_size = 2 * H * 4;
const index_t cell_size = N * H;
const index_t projection_size = (P ? P : H) * N;
DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2;
DType* y_cur_ptr = y_ptr;
int idx = 0; // state & cell state's idx;
bool flag = L % 2 ? false : true;
for (int i = 0; i < L; ++i) {
const index_t input_size = i ? (P ? P : H) * D : I;
index_t w_size = (input_size + (P ? P : H)) * H * 4;
if (P > 0) {
w_size += P * H;
}
// If bidirectional, need space to save current layer output y.
if (D == 2) {
y_cur_ptr = flag ? y_tmp_ptr : y_ptr;
flag = !flag;
}
Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, (P ? P : H) * D));
LstmForwardInferenceSingleLayer<DType>(ws,
state_outputs,
false,
T,
N,
input_size,
H,
P,
x,
hx[idx],
cx[idx],
y,
w_ptr,
b_ptr,
hy_ptr,
cy_ptr);
// If bidirectional, then calculate the reverse direction's forward result.
if (D == 2) {
w_ptr += w_size;
b_ptr += b_size;
++idx;
if (state_outputs) {
hy_ptr += projection_size;
cy_ptr += cell_size;
}
LstmForwardInferenceSingleLayer<DType>(ws,
state_outputs,
true,
T,
N,
input_size,
H,
P,
x,
hx[idx],
cx[idx],
y,
w_ptr,
b_ptr,
hy_ptr,
cy_ptr);
}
// Don't need to move pointer in the last layer.
if (i != L - 1) {
w_ptr += w_size;
b_ptr += b_size;
x_ptr = y_cur_ptr;
++idx;
if (state_outputs) {
hy_ptr += projection_size;
cy_ptr += cell_size;
}
}
}
}
template <typename DType>
void LstmBackwardSingleLayer(DType* ws,
DType* rs,
DType* tmp_buf,
bool bid,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType>& x,
const Tensor<cpu, 2, DType>& hx,
const Tensor<cpu, 2, DType>& cx,
const Tensor<cpu, 3, DType>& y,
const Tensor<cpu, 3, DType>& dy,
const Tensor<cpu, 2, DType>& dx,
const Tensor<cpu, 2, DType>& dhx,
const Tensor<cpu, 2, DType>& dcx,
DType* dhy_ptr,
DType* dcy_ptr,
DType* w_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell) {
using namespace mshadow;
const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I));
const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I));
Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H));
Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4));
Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4));
DType* c_ptr = bid ? rs + T * N * H * 7 : rs;
const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H));
const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H * 4 * H; ++i) {
dwh.dptr_[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 4 * H; ++i) {
dbx.dptr_[i] = 0;
dbh.dptr_[i] = 0;
}
}
Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H));
Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H));
Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H));
Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H));
const int offset = bid ? H : 0;
const DType alpha = 1.0;
const DType beta0 = 0.0;
const DType beta1 = 1.0;
const DType beta2 = 2.0;
const index_t cell_size = N * H;
if (dhy_ptr != nullptr) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < cell_size; ++i) {
dh.dptr_[i] = dhy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < cell_size; ++i) {
dh.dptr_[i] = 0;
}
}
if (dcy_ptr != nullptr) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < cell_size; ++i) {
dc.dptr_[i] = dcy_ptr[i];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < cell_size; ++i) {
dc.dptr_[i] = 0;
}
}
for (index_t i = T - 1; i >= 0; --i) {
index_t t = bid ? T - 1 - i : i;
index_t tnext = bid ? t + 1 : t - 1;
const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx;
const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx;
const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx;
const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx;
#pragma omp parallel for num_threads(omp_threads)
for (index_t jk = 0; jk < cell_size; ++jk) {
index_t j = jk / H;
index_t k = jk % H;
DType tc = tanh(c[i][j][k]);
DType it = ifgo[i][j][k][0];
DType ft = ifgo[i][j][k][1];
DType gt = ifgo[i][j][k][2];
DType ot = ifgo[i][j][k][3];
dh[j][k] += dy[t][j][k + offset];
dc[j][k] += dh[j][k] * ot * (1 - tc * tc);
difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it);
difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft);
difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt);
difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot);
if (req_statecell != kNullOp || i > 0) {
dcnext[j][k] = dc[j][k] * ft;
}
if (i) {
htmp[j][k] = y[tnext][j][k + offset];
}
}
Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4));
if (req_state != kNullOp || i > 0) {
linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false);
}
if (req_params != kNullOp) {
if (req_params != kAddTo) {
linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false);
} else {
linalg_gemm(dyh, hnext, dwh, alpha, beta2, true, false);
// generate dwx every time step for AddTo
Tensor<cpu, 2, DType> x_t(x.dptr_ + i * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> dyx_t(difgo.dptr_ + i * N * H * 4, Shape2(N, H * 4));
linalg_gemm(dyx_t, x_t, dwx, alpha, beta2, true, false);
}
}
}
Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4));
if (req_data != kNullOp) {
linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false);
}
if (req_params != kNullOp && req_params != kAddTo) {
linalg_gemm(dyx, x, dwx, alpha, beta0, true, false);
}
const index_t row = T * N;
const index_t col = H * 4;
if (req_params != kNullOp) {
if (req_params != kAddTo) {
for (index_t i = 0; i < row; ++i) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t j = 0; j < col; ++j) {
dbx[j] += dyx[i][j];
dbh[j] = dbx[j];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf, Shape2(col, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + col * T, Shape2(col, T));
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < col * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (index_t t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t j = 0; j < col; ++j) {
for (index_t i = 0; i < N; ++i) {
tmp_dbx[j][t] += dyx[t * N + i][j];
tmp_dbh[j][t] = tmp_dbx[j][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t j = 0; j < col; ++j) {
dbx[j] += tmp_dbx[j][t] + dbx[j];
dbh[j] += tmp_dbh[j][t] + dbh[j];
}
}
}
}
}
template <typename DType>
void LstmBackward(DType* ws,
DType* rs,
const int L,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* cx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dcy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dcx_ptr,
DType* dw_ptr,
DType* db_ptr,
int req_data,
int req_params,
int req_state,
int req_statecell,
const float dropout) {
DType* dropout_random = rs + (L - 1) * D * T * N * H;
DType* rs2 = rs + (L - 1) * D * T * N * H;
DType* tmp_buf = ws;
DType* ws2 = tmp_buf + 8 * T * H;
const int total_layers = D * L;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H));
Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H));
const index_t b_size = 2 * H * 4;
const index_t r_size = D * T * N * H * 6;
const index_t y_offset = T * N * H * 5;
const index_t w_size1 = (I + H) * H * 4; // first layer
const index_t w_size2 = (D * H + H) * H * 4; // other layers
const index_t cell_size = N * H;
const index_t y_size = T * N * H * D;
DType* dy_tmp_ptr = ws2 + T * cell_size * 4 + cell_size * 3;
for (int i = L - 1; i >= 0; --i) {
const index_t input_size = i ? H * D : I;
const index_t w_size = i ? w_size2 : w_size1;
int idx = i * D;
DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr;
DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr;
DType* db_cur_ptr = db_ptr + i * b_size * D;
DType* rs_cur_ptr = rs2 + i * r_size;
DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : nullptr;
DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : nullptr;
Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D));
Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D));
Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size));
Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size));
LstmBackwardSingleLayer<DType>(ws2,
rs_cur_ptr,
tmp_buf,
false,
T,
N,
input_size,
H,
x,
hx[idx],
cx[idx],
y,
dy,
dx,
dhx[idx],
dcx[idx],
dhy_cur_ptr,
dcy_cur_ptr,
w_cur_ptr,
dw_cur_ptr,
db_cur_ptr,
req_data,
req_params,
req_state,
req_statecell);
if (D == 2) {
w_cur_ptr += w_size;
dw_cur_ptr += w_size;
db_cur_ptr += b_size;
++idx;
dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : nullptr;
dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : nullptr;
LstmBackwardSingleLayer<DType>(ws2,
rs_cur_ptr,
tmp_buf,
true,
T,
N,
input_size,
H,
x,
hx[idx],
cx[idx],
y,
dy,
dx,
dhx[idx],
dcx[idx],
dhy_cur_ptr,
dcy_cur_ptr,
w_cur_ptr,
dw_cur_ptr,
db_cur_ptr,
req_data,
req_params,
req_state,
req_statecell);
// Prevent overwritting dy while calculating dx in left2right layer
const int loop_iteration = (L - 1) - i;
dy_tmp_ptr = loop_iteration % 2 ? dy_tmp_ptr - y_size : dy_tmp_ptr + y_size;
}
if (dropout > 0.0f && i > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (index_t j = 0; j < T * N * D * H; j++) {
if (dropout_random[j] == 0) {
dx.dptr_[j] = 0;
} else {
dx.dptr_[j] = dx.dptr_[j] / (1.0f - dropout);
}
}
}
dy_ptr = dx.dptr_;
}
}
template <typename DType>
void GruForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType>& x,
const Tensor<cpu, 2, DType>& hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gemmC2 + N * 3 * H;
DType* zt = rt + N * H;
DType* nt = zt + N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != nullptr) ? bx_ptr + 3 * H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr) ? bh_ptr + 3 * H * 2 : nullptr;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (index_t t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp =
Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t rtb = i * 3 * H;
index_t ztb = i * 3 * H + H;
index_t ntb = i * 3 * H + 2 * H;
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + bx[1][j] + bh[1][j]);
nt[i * H + j] =
tanh(gemmC1_t[ntb + j] + bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] =
(1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp =
Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t rtb = i * 3 * H;
index_t ztb = i * 3 * H + H;
index_t ntb = i * 3 * H + 2 * H;
rt[i * H + j] =
sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] =
sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] =
(1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const index_t T,
const index_t N,
index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr) {
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx =
wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardInferenceSingleLayer<DType>(
ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l);
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
}
template <typename DType>
void GruForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType>& x,
const Tensor<cpu, 2, DType>& hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* y_ptr,
DType* hy_ptr) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, 3 * H]
DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H
DType* rt = gateR;
DType* zt = gateZ;
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_bx_ptr = (bx_ptr != nullptr) ? bx_ptr + 3 * H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr) ? bh_ptr + 3 * H * 2 : nullptr;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_Mnh = Mnh + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * 3 * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H));
// x * wx.T : [T * N, I] * [I, 3 * H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (index_t t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[3 * H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp =
Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * 3 * H;
DType* Mnht = Mnh + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t rtb = i * 3 * H;
index_t ztb = i * 3 * H + H;
index_t ntb = i * 3 * H + 2 * H;
Mnht[i * H + j] = gemmC2[ntb + j] + bh[2][j];
rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + bx[0][j] + bh[0][j]);
zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + bx[1][j] + bh[1][j]);
nt[i * H + j] =
tanh(gemmC1_t[ntb + j] + bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j]));
ht[i * D * H + j] =
(1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * ht_1[i * D * H + j];
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
rt = back_gateR + (T - 1 - t) * N * H;
zt = back_gateZ + (T - 1 - t) * N * H;
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp =
Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
DType* back_Mnht = back_Mnh + (T - 1 - t) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t rtb = i * 3 * H;
index_t ztb = i * 3 * H + H;
index_t ntb = i * 3 * H + 2 * H;
back_Mnht[i * H + j] = gemmC2[ntb + j] + back_bh[2][j];
rt[i * H + j] =
sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]);
zt[i * H + j] =
sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]);
nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j] +
rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j]));
back_ht[i * D * H + j] =
(1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * back_ht_1[i * D * H + j];
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void GruForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const index_t T,
const index_t N,
index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout,
std::mt19937& rnd_engine) { // NOLINT(runtime/references)
DType* wx = w_ptr;
DType* wh = wx + I * H * 3;
DType* bx =
wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* bh = bx + H * 3;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateR_l = rs;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
std::uniform_real_distribution<float> distribution(0, 1);
for (index_t i = 0; i < T * N * I; i++) {
if (distribution(rnd_engine) < dropout) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
GruForwardTrainingSingleLayer<DType>(ws2,
tmp_buf,
state_outputs,
D,
T,
N,
I,
H,
x_l,
hx_l,
wx_l,
wh_l,
bx_l,
bh_l,
gateR_l,
gateZ_l,
gateN_l,
Mnh_l,
y_l,
hy_l);
gateR_l = gateR_l + T * D * N * H;
gateZ_l = gateZ_l + T * D * N * H;
gateN_l = gateN_l + T * D * N * H;
Mnh_l = Mnh_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + 3 * H * D * 2;
bh_l = bh_l + 3 * H * D * 2;
wx_l = wx_l + I * H * 3 * D + H * H * 3 * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * 3 * H;
}
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void GruBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType>& x,
const Tensor<cpu, 2, DType>& hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateR,
DType* gateZ,
DType* gateN,
DType* Mnh,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* rt;
DType* zt;
DType* nt;
DType* dat;
DType* dart;
DType* dar = ws; // [T, N, 3 * H]
DType* da = dar + T * N * 3 * H; // [T, N, 3 * H]
DType* dht1 = da + T * N * 3 * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* Mnht = Mnh;
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_Mnht = Mnh + T * N * H;
DType* back_gateR = gateR + T * N * H;
DType* back_gateZ = gateZ + T * N * H;
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H;
DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H;
DType* back_dwx = dwx + I * 3 * H + H * 3 * H;
DType* back_dwh = dwh + I * 3 * H + H * 3 * H;
DType* back_dbx = dbx + 3 * H * 2;
DType* back_dbh = dbh + 3 * H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * 3 * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * 3 * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (index_t t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
rt = gateR + t * N * H;
zt = gateZ + t * N * H;
nt = gateN + t * N * H;
Mnht = Mnh + t * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
int nid = i * 3 * H + 2 * H + j;
int zid = i * 3 * H + H + j;
int rid = i * 3 * H + j;
int id = i * H + j;
dat[nid] = dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] = dht1[id] * (ht1[i * D * H + j] - nt[id]) * zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * Mnht[id] * rt[id] * (1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
dht1[id] = dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(3 * H, H));
Tensor<cpu, 3, DType> d_ht1_tmp =
Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (index_t j = 0; j < N * T; ++j) {
dbx[i] += da[j * 3 * H + i];
dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (index_t t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (index_t j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] += tmp_dbh[i][t] + dbh[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I));
linalg_gemm(d_da, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (index_t t = 0; t < T; ++t) {
if (t == T - 1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
rt = back_gateR + t * N * H;
zt = back_gateZ + t * N * H;
nt = back_gateN + t * N * H;
back_Mnht = Mnh + (T + t) * N * H;
dat = da + t * N * 3 * H;
dart = dar + t * N * 3 * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t nid = i * 3 * H + 2 * H + j;
index_t zid = i * 3 * H + H + j;
index_t rid = i * 3 * H + j;
index_t id = i * H + j;
dat[nid] = back_dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]);
dart[zid] = dat[zid] =
back_dht1[id] * (back_ht1[i * D * H + H + j] - nt[id]) * zt[id] * (1 - zt[id]);
dart[rid] = dat[rid] = dat[nid] * back_Mnht[id] * rt[id] * (1 - rt[id]);
dart[nid] = dat[nid] * rt[id];
back_dht1[id] = back_dht1[id] * zt[id];
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, 3 * H] * [3 * H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [3 * H, H] = [3 * H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(3 * H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp =
Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_dat, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (index_t j = 0; j < N * T; ++j) {
back_dbx[i] += da[j * 3 * H + i];
back_dbh[i] += dar[j * 3 * H + i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T));
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < H * T * 3; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (index_t t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
for (index_t j = 0; j < N; ++j) {
tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i];
tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < 3 * H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] += tmp_dbh[i][t] + back_dbh[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I]
Tensor<cpu, 2, DType> d_da2(da, Shape2(T * N, 3 * H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_da2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I));
linalg_gemm(d_da2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void GruBackward(DType* ws,
DType* rs,
const int L,
const int D,
const index_t T,
const index_t N,
index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H * 3;
DType* dbx =
dwh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D;
DType* gateR_l = rs + (L - 1) * T * D * N * H;
DType* gateZ_l = gateR_l + L * T * D * N * H;
DType* gateN_l = gateZ_l + L * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* Mnh_l = y_l + L * T * N * H * D;
DType* dropout_random = Mnh_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + 3 * H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l =
(L == 1) ? wx : wx + (L - 2) * D * (D + 1) * H * 3 * H + D * I * 3 * H + D * H * 3 * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H * 3;
} else {
wh_l = wh_l + (D * H) * H * 3;
}
DType* dhy_l = nullptr;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l =
(L == 1) ? dwx : dwx + (L - 2) * D * (D + 1) * H * 3 * H + D * I * 3 * H + D * H * 3 * H;
DType* dwh_l = nullptr;
if (L == 1) {
dwh_l = dwx_l + I * H * 3;
} else {
dwh_l = dwx_l + (D * H) * H * 3;
}
DType* dbx_l = dbx + (L - 1) * D * 3 * H * 2;
DType* dbh_l = dbx_l + 3 * H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
index_t inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
GruBackwardSingleLayer<DType>(ws2,
tmp_buf,
D,
T,
N,
I,
H,
x_l,
hx_l,
wx_l,
wh_l,
y_l,
dy_l,
dhy_l,
gateR_l,
gateZ_l,
gateN_l,
Mnh_l,
dx_l,
dhx_l,
dwx_l,
dwh_l,
dbx_l,
dbh_l,
req_data,
req_params,
req_state);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateR_l = gateR_l - T * D * N * H;
gateZ_l = gateZ_l - T * D * N * H;
gateN_l = gateN_l - T * D * N * H;
Mnh_l = Mnh_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_tmp - T * N * H * D;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * 3 * D;
wh_l = wx_l + inputsize * 3 * H;
dwx_l = dwx_l - (inputsize + H) * H * 3 * D;
dwh_l = dwx_l + inputsize * 3 * H;
} else {
wx_l = wx_l - (I + H) * H * 3 * D;
wh_l = wx_l + I * 3 * H;
dwx_l = dwx_l - (I + H) * H * 3 * D;
dwh_l = dwx_l + I * 3 * H;
}
dbx_l = dbx_l - D * 3 * H * 2;
dbh_l = dbx_l + 3 * H;
}
}
}
template <typename DType>
void VanillaRNNForwardInferenceSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType>& x,
const Tensor<cpu, 2, DType>& hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != nullptr) ? bx_ptr + H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr) ? bh_ptr + H * 2 : nullptr;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (index_t t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp =
Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t tb = i * H;
if (mode == 1) {
ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]);
} else {
ht[i * D * H + j] = relu(gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp =
Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t tb = i * H;
if (mode == 1) {
back_ht[i * D * H + j] =
tanh(gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]);
} else {
back_ht[i * D * H + j] =
relu(gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardInference(DType* ws,
bool state_outputs,
const int L,
const int D,
const index_t T,
const index_t N,
index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H) + (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
DType* y_tmp = ws;
DType* y_l = x_ptr;
DType* tmp_buf = y_tmp + D * T * N * H;
DType* ws2 = y_tmp + D * T * N * H + D * H * N;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
for (int l = 0; l < L; l++) {
Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I));
if ((L + l) % 2) {
y_l = y_ptr;
} else {
y_l = y_tmp;
}
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardInferenceSingleLayer<DType>(ws2,
tmp_buf,
state_outputs,
D,
T,
N,
I,
H,
x_l,
hx_l,
wx_l,
wh_l,
bx_l,
bh_l,
y_l,
hy_l,
mode);
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
}
template <typename DType>
void VanillaRNNForwardTrainingSingleLayer(DType* ws,
DType* tmp_buf,
bool state_outputs,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType>& x,
const Tensor<cpu, 2, DType>& hx,
DType* wx_ptr,
DType* wh_ptr,
DType* bx_ptr,
DType* bh_ptr,
DType* gateN,
DType* y_ptr,
DType* hy_ptr,
int mode) {
DType* ht = y_ptr;
DType* ht_1 = y_ptr;
DType* back_ht_1 = y_ptr + (T - 1) * N * H * D + H;
DType* back_ht = back_ht_1;
DType* gemmC1 = ws; // [D, T, N, H]
DType* gemmC2 = gemmC1 + D * T * N * H; // N * H
DType* nt = gateN;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_bx_ptr = (bx_ptr != nullptr) ? bx_ptr + H * 2 : nullptr;
DType* back_bh_ptr = (bh_ptr != nullptr) ? bh_ptr + H * 2 : nullptr;
DType* back_gateN = gateN + T * N * H;
DType* back_gemmC1 = gemmC1 + T * N * H;
DType* gemmC1_t = gemmC1;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 1, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 1, H));
const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H));
const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (D == 1) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * H + j] = hx[i][j];
}
} else {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
y_ptr[i * D * H + j] = hx[i][j];
back_ht_1[i * D * H + j] = hx[N + i][j];
}
}
Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H));
Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H));
Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H));
// x * wx.T : [T * N, I] * [I, H]
DType alpha = 1.0;
DType beta = 0.0;
linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true);
if (D == 2) {
linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true);
}
for (index_t t = 0; t < T; t++) {
// perform the first direction, X * wx and H * wh for each step
// ht-1 * wh, ht-1:[N, H] wh:[H, H]
Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H));
if (D == 1) {
linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true);
} else {
Tensor<cpu, 3, DType> dht_1_tmp =
Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N));
linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true);
}
nt = gateN + t * N * H;
gemmC1_t = gemmC1 + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t tb = i * H;
if (mode == 1) {
nt[tb + j] = ht[i * D * H + j] =
tanh(gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j];
ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
ht_1 = ht;
ht = ht + D * H * N;
// perform the second direction
if (D == 2) {
nt = back_gateN + (T - 1 - t) * N * H;
gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H;
Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H));
Tensor<cpu, 3, DType> dback_ht_1_tmp =
Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N));
linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true);
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t tb = i * H;
if (mode == 1) {
nt[tb + j] = back_ht[i * D * H + j] =
tanh(gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]);
} else {
nt[tb + j] = gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j];
back_ht[i * D * H + j] = relu(nt[tb + j]);
}
}
}
back_ht_1 = back_ht;
back_ht = back_ht - D * H * N;
}
}
// copy last state to hy, from(N, H * D) to (D, N, H)
if (state_outputs) {
if (D == 1) {
DType* y_start = y_ptr + (T - 1) * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * H + j];
}
} else {
DType* y_start = y_ptr + (T - 1) * N * H * D;
DType* y_back_start = y_ptr + H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; i++)
for (int j = 0; j < H; j++) {
hy_ptr[i * H + j] = y_start[i * D * H + j];
hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j];
}
}
}
}
template <typename DType>
void VanillaRNNForwardTraining(DType* ws,
DType* rs,
bool state_outputs,
const int L,
const int D,
const index_t T,
const index_t N,
index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* y_ptr,
DType* hy_ptr,
const float dropout,
int mode,
std::mt19937& rnd_engine) { // NOLINT(runtime/references)
DType* wx = w_ptr;
DType* wh = wx + I * H;
DType* bx = wh + H * H + (D - 1) * (H * H + I * H) + (L - 1) * ((D + 1) * H) * H * D;
DType* bh = bx + H;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H));
DType* hy_l = hy_ptr;
DType* gateN_l = rs;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* ws2 = tmp_buf + D * N * H;
DType* wx_l = wx;
DType* wh_l = wh;
DType* bx_l = bx;
DType* bh_l = bh;
DType* y_tmp = x_ptr;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = 0; l < L; l++) {
if (l != 0) {
y_tmp = y_l;
y_l = y_l + T * N * H * D;
}
if (dropout > 0.0f && l > 0) {
std::uniform_real_distribution<float> distribution(0, 1);
for (index_t i = 0; i < T * N * I; i++) {
if (distribution(rnd_engine) < dropout) {
dropout_random[(l - 1) * T * N * I + i] = 0;
y_tmp[i] = 0;
} else {
dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout;
y_tmp[i] = y_tmp[i] / (1.0f - dropout);
}
}
}
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
Tensor<cpu, 2, DType> hx_l = hx[D * l];
VanillaRNNForwardTrainingSingleLayer<DType>(ws2,
tmp_buf,
state_outputs,
D,
T,
N,
I,
H,
x_l,
hx_l,
wx_l,
wh_l,
bx_l,
bh_l,
gateN_l,
y_l,
hy_l,
mode);
gateN_l = gateN_l + T * D * N * H;
hy_l = hy_l + D * N * H;
bx_l = bx_l + H * D * 2;
bh_l = bh_l + H * D * 2;
wx_l = wx_l + I * H * D + H * H * D;
if (l == 0) {
I = D * H;
}
wh_l = wx_l + I * H;
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * H * D; ++i) {
y_ptr[i] = y_l[i];
}
}
template <typename DType>
void VanillaRNNBackwardSingleLayer(DType* ws,
DType* tmp_buf,
const int D,
const index_t T,
const index_t N,
const index_t I,
const int H,
const Tensor<cpu, 2, DType>& x,
const Tensor<cpu, 2, DType>& hx,
DType* wx_ptr,
DType* wh_ptr,
DType* y_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* gateN,
DType* dx,
DType* dhx,
DType* dwx,
DType* dwh,
DType* dbx,
DType* dbh,
int req_data,
int req_params,
int req_state,
int mode) {
DType* dyt;
DType* ht1; // [N, D, H]
DType* dart;
DType* nt;
DType* dar = ws; // [T, N, H]
DType* dht1 = dar + T * N * H; // [D, N, H]
DType* hx_ = dht1 + D * N * H; // [N, D, H]
DType* back_ht1;
DType* back_dht1 = dht1 + N * H; // [N, H]
DType* back_gateN = gateN + T * N * H;
DType* back_wx_ptr = wx_ptr + I * H + H * H;
DType* back_wh_ptr = wh_ptr + I * H + H * H;
DType* back_dwx = dwx + I * H + H * H;
DType* back_dwh = dwh + I * H + H * H;
DType* back_dbx = dbx + H * 2;
DType* back_dbh = dbh + H * 2;
DType alpha = 1.0;
DType beta = 0.0;
const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H));
const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I));
const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H));
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
if (req_params != kNullOp && req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H * H; ++i) {
dwh[i] = 0;
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < D * H; ++i) {
dbx[i] = 0;
dbh[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N * H; ++i) {
if (dhy_ptr) {
dht1[i] = dhy_ptr[i];
} else {
dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + j] = hx[i][j];
}
}
if (D == 2) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N * H; ++i) {
if (dhy_ptr) {
back_dht1[i] = dhy_ptr[N * H + i];
} else {
back_dht1[i] = 0;
}
}
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
hx_[i * D * H + H + j] = hx[N + i][j];
}
}
}
for (index_t t = T - 1; t >= 0; --t) {
if (t) {
ht1 = y_ptr + (t - 1) * N * D * H;
} else {
ht1 = hx_;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
dht1[i * H + j] += dyt[i * D * H + j];
}
}
nt = gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t id = i * H + j;
if (mode == 1) {
dart[id] = dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(dht1[id]) : 0.0f;
}
dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = dart * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H));
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false);
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_dwx, alpha, beta, true, false);
}
// dwh = dart.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H));
Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(H, H));
Tensor<cpu, 3, DType> d_ht1_tmp =
Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N));
linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (index_t j = 0; j < N * T; ++j) {
dbx[i] += dar[j * H + i];
dbh[i] = dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (index_t t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (index_t j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
dbx[i] += tmp_dbx[i][t] + dbx[i];
dbh[i] = dbx[i];
}
}
}
}
alpha = 1.0;
beta = 0.0;
// dx = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar, wx, d_dx, alpha, beta, false, false);
}
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I));
linalg_gemm(d_dar, x, d_dwx, alpha, beta, true, false);
}
if (D == 2) {
for (index_t t = 0; t < T; ++t) {
if (t == T - 1) {
back_ht1 = hx_;
} else {
back_ht1 = y_ptr + (t + 1) * N * D * H;
}
// add dy[T, N, D, H] to dhy[D, N, H]
dyt = dy_ptr + t * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
back_dht1[i * H + j] += dyt[i * D * H + H + j];
}
}
nt = back_gateN + t * N * H;
dart = dar + t * N * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N; ++i) {
for (int j = 0; j < H; ++j) {
index_t id = i * H + j;
if (mode == 1) {
dart[id] = back_dht1[id] * (1 - nt[id] * nt[id]);
} else {
dart[id] = nt[id] > 0.0f ? static_cast<float>(back_dht1[id]) : 0.0f;
}
back_dht1[id] = 0;
}
}
if (req_params != kNullOp) {
alpha = 1.0;
beta = 1.0;
// dht1 = da * wh [N, H] = [N, H] * [H, H]
Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H));
Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H));
linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false);
// dwh = da.T * ht1 [H, H] = [H, N] * [N, H]
Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(H, H));
Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H));
Tensor<cpu, 3, DType> d_back_ht1_tmp =
Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N));
d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N));
if (req_params == kAddTo) {
beta = 2.0;
// dwx = da.T * x [ H, I] = [H, N] * [N, I] for AddTo
Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I));
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dart, d_xt, d_back_dwx, alpha, beta, true, false);
}
linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true);
}
}
if (req_params != kNullOp) {
// dbx = e * da [1, H] = [1, N] * [N, H]
if (req_params != kAddTo) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (index_t j = 0; j < N * T; ++j) {
back_dbx[i] += dar[j * H + i];
back_dbh[i] = back_dbx[i];
}
}
} else {
const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T));
const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T));
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < H * T; ++i) {
tmp_dbx.dptr_[i] = 0;
tmp_dbh.dptr_[i] = 0;
}
for (index_t t = T - 1; t >= 0; --t) {
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
for (index_t j = 0; j < N; ++j) {
tmp_dbx[i][t] += dar[t * N * H + j * H + i];
tmp_dbh[i][t] = tmp_dbx[i][t];
}
}
#pragma omp parallel for num_threads(omp_threads)
for (int i = 0; i < H; ++i) {
back_dbx[i] += tmp_dbx[i][t] + back_dbx[i];
back_dbh[i] = back_dbx[i];
}
}
}
}
alpha = 1.0;
beta = 1.0;
// dxt = da * wx [T * N, I] = [T * N, H] * [H, I]
Tensor<cpu, 2, DType> d_dar2(dar, Shape2(T * N, H));
if (req_data != kNullOp) {
Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I));
linalg_gemm(d_dar2, back_wx, d_dx, alpha, beta, false, false);
}
alpha = 1.0;
beta = 0.0;
// dwx = da.T * x [H, I] = [H, T * N] * [T * N, I]
if (req_params != kNullOp && req_params != kAddTo) {
Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I));
linalg_gemm(d_dar2, x, d_back_dwx, alpha, beta, true, false);
}
}
if (req_state != kNullOp) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < N * H * D; ++i) {
dhx[i] = dht1[i];
}
}
}
template <typename DType>
void VanillaRNNBackward(DType* ws,
DType* rs,
const int L,
const int D,
const index_t T,
const index_t N,
index_t I,
const int H,
DType* x_ptr,
DType* hx_ptr,
DType* w_ptr,
DType* dy_ptr,
DType* dhy_ptr,
DType* dx_ptr,
DType* dhx_ptr,
DType* dw_ptr,
int req_data,
int req_params,
int req_state,
const float dropout,
int mode) {
DType* wx = w_ptr;
DType* dwx = dw_ptr;
DType* dwh = dwx + I * H;
DType* dbx = dwh + H * H + (D - 1) * (H * H + I * H) + (L - 1) * ((D + 1) * H) * H * D;
DType* gateN_l = rs + (L - 1) * T * D * N * H;
DType* y_l = gateN_l + L * T * D * N * H;
DType* dropout_random = y_l + L * D * T * N * H;
DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H;
DType* dx_l = tmp_buf + T * N * D * H + H * T * 2;
DType* ws2 = dx_l + T * N * D * H;
DType* wx_l = (L == 1) ? wx : wx + (L - 2) * D * (D + 1) * H * H + D * I * H + D * H * H;
DType* wh_l = wx_l;
if (L == 1) {
wh_l = wh_l + I * H;
} else {
wh_l = wh_l + (D * H) * H;
}
DType* dhy_l = nullptr;
if (dhy_ptr)
dhy_l = dhy_ptr + (L - 1) * D * N * H;
DType* dwx_l = (L == 1) ? dwx : dwx + (L - 2) * D * (D + 1) * H * H + D * I * H + D * H * H;
DType* dwh_l = nullptr;
if (L == 1) {
dwh_l = dwx_l + I * H;
} else {
dwh_l = dwx_l + (D * H) * H;
}
DType* dbx_l = dbx + (L - 1) * D * H * 2;
DType* dbh_l = dbx_l + H;
DType* dhx_l = dhx_ptr + (L - 1) * D * N * H;
DType* dy_l = dy_ptr;
Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H));
index_t inputsize = I;
DType* y_tmp = y_l - T * N * H * D;
const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
for (int l = L - 1; l >= 0; --l) {
if (l == 0) {
I = inputsize;
y_tmp = x_ptr;
dx_l = dx_ptr;
} else {
I = D * H;
}
Tensor<cpu, 2, DType> hx_l = hx[l];
Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I));
VanillaRNNBackwardSingleLayer<DType>(ws2,
tmp_buf,
D,
T,
N,
I,
H,
x_l,
hx_l,
wx_l,
wh_l,
y_l,
dy_l,
dhy_l,
gateN_l,
dx_l,
dhx_l,
dwx_l,
dwh_l,
dbx_l,
dbh_l,
req_data,
req_params,
req_state,
mode);
if (dropout > 0.0f && l > 0 && req_data != kNullOp) {
dropout_random = dropout_random - T * N * D * H;
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * I; i++) {
if (dropout_random[i] == 0) {
dx_l[i] = 0;
} else {
dx_l[i] = dx_l[i] / (1.0f - dropout);
}
}
}
if (l > 0) {
#pragma omp parallel for num_threads(omp_threads)
for (index_t i = 0; i < T * N * H * D; ++i) {
dy_l[i] = dx_l[i];
}
gateN_l = gateN_l - T * D * N * H;
dhx_l = dhx_l - D * N * H;
if (dhy_l)
dhy_l = dhy_l - D * N * H;
y_l = y_l - T * N * H * D;
y_tmp = y_l;
if (l == 1) {
wx_l = wx_l - (inputsize + H) * H * D;
wh_l = wx_l + inputsize * H;
dwx_l = dwx_l - (inputsize + H) * H * D;
dwh_l = dwx_l + inputsize * H;
} else {
wx_l = wx_l - (I + H) * H * D;
wh_l = wx_l + I * H;
dwx_l = dwx_l - (I + H) * H * D;
dwh_l = dwx_l + I * H;
}
dbx_l = dbx_l - D * H * 2;
dbh_l = dbx_l + H;
}
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_RNN_IMPL_H_
|
parallel_execution_omp.h | /*
* Copyright 2018 Universidad Carlos III de Madrid
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GRPPI_OMP_PARALLEL_EXECUTION_OMP_H
#define GRPPI_OMP_PARALLEL_EXECUTION_OMP_H
#ifdef GRPPI_OMP
#include "../common/mpmc_queue.h"
#include "../common/iterator.h"
#include "../common/execution_traits.h"
#include "../common/configuration.h"
#include "grppi/seq/sequential_execution.h"
#include <type_traits>
#include <tuple>
#include <omp.h>
namespace grppi {
/**
* \brief OpenMP parallel execution policy.
*
* This policy uses OpenMP as implementation back-end.
*
* \note On gcc, the implementation will not work properly when the
* concurrency degree is lower than the number of tasks needed in a pipeline.
* This is a known issue due to the fact that gcc does not run the scheduler
* after calling taskyield(). This does not happen with clang or intel
* compilers.
*/
class parallel_execution_omp {
public:
/**
\brief Default construct an OpenMP parallel execution policy.
Creates an OpenMP parallel execution object.
The concurrency degree is determined by the platform according to OpenMP
rules.
*/
parallel_execution_omp() noexcept {};
parallel_execution_omp(int concurrency_degree) noexcept
:
concurrency_degree_{concurrency_degree}
{
omp_set_num_threads(concurrency_degree_);
}
/** @brief Set num_threads to _threads in order to run in parallel
*
* @param _threads number of threads used in the parallel mode
*/
/**
\brief Constructs an OpenMP parallel execution policy.
Creates an OpenMP parallel execution object selecting the concurrency degree
and ordering.
\param concurrency_degree Number of threads used for parallel algorithms.
\param order Whether ordered executions is enabled or disabled.
*/
parallel_execution_omp(int concurrency_degree, bool order) noexcept
:
concurrency_degree_{concurrency_degree},
ordering_{order}
{
omp_set_num_threads(concurrency_degree_);
}
/**
\brief Set number of grppi threads.
*/
void set_concurrency_degree(int degree) noexcept
{
concurrency_degree_ = degree;
omp_set_num_threads(concurrency_degree_);
}
/**
\brief Get number of grppi threads.
*/
int concurrency_degree() const noexcept
{
return concurrency_degree_;
}
/**
\brief Enable ordering.
*/
void enable_ordering() noexcept { ordering_ = true; }
/**
\brief Disable ordering.
*/
void disable_ordering() noexcept { ordering_ = false; }
/**
\brief Is execution ordered.
*/
bool is_ordered() const noexcept { return ordering_; }
/**
\brief Sets the attributes for the queues built through make_queue<T>(()
*/
void set_queue_attributes(int size, queue_mode mode) noexcept
{
queue_size_ = size;
queue_mode_ = mode;
}
/**
\brief Makes a communication queue for elements of type T.
Constructs a queue using the attributes that can be set via
set_queue_attributes(). The value is returned via move semantics.
*/
template<typename T>
mpmc_queue <T> make_queue() const
{
return {queue_size_, queue_mode_};
}
/**
\brief Returns the reference of a communication queue for elements of type T
if the queue has been created in an outer pattern.
Returns the reference of the queue received as argument.
\tparam T Element type for the queue.
\tparam Transformers List of the next transformers.
\param queue Reference of a queue of type T
*/
template<typename T, typename ... Transformers>
mpmc_queue <T> &
get_output_queue(mpmc_queue <T> & queue, Transformers && ...) const
{
return queue;
}
/**
\brief Makes a communication queue for elements of type T
if the queue has not been created in an outer pattern.
Call to the make_queue function and the value is returned via move semantics.
\tparam T Element type for the queue.
\tparam Transformers List of the next transformers.
*/
template<typename T, typename ... Transformers>
mpmc_queue <T> get_output_queue(Transformers && ...) const
{
return std::move(make_queue<T>());
}
/**
\brief Get index of current thread in the thread table
*/
[[deprecated("Thread ids are deprecated.\n"
"If you have a specific use case file a bug")]]
int get_thread_id() const noexcept
{
int result;
#pragma omp parallel
{
result = omp_get_thread_num();
}
return result;
}
/**
\brief Applies a transformation to multiple sequences leaving the result in
another sequence using available OpenMP parallelism
\tparam InputIterators Iterator types for input sequences.
\tparam OutputIterator Iterator type for the output sequence.
\tparam Transformer Callable object type for the transformation.
\param firsts Tuple of iterators to input sequences.
\param first_out Iterator to the output sequence.
\param sequence_size Size of the input sequences.
\param transform_op Transformation callable object.
\pre For every I iterators in the range
`[get<I>(firsts), next(get<I>(firsts),sequence_size))` are valid.
\pre Iterators in the range `[first_out, next(first_out,sequence_size)]` are valid.
*/
template<typename ... InputIterators, typename OutputIterator,
typename Transformer>
void map(std::tuple<InputIterators...> firsts,
OutputIterator first_out,
std::size_t sequence_size, Transformer transform_op) const;
/**
\brief Applies a reduction to a sequence of data items.
\tparam InputIterator Iterator type for the input sequence.
\tparam Identity Type for the identity value.
\tparam Combiner Callable object type for the combination.
\param first Iterator to the first element of the sequence.
\param sequence_size Size of the input sequence.
\param identity Identity value for the reduction.
\param combine_op Combination callable object.
\pre Iterators in the range `[first,last)` are valid.
\return The reduction result
*/
template<typename InputIterator, typename Identity, typename Combiner>
auto reduce(InputIterator first, std::size_t sequence_size,
Identity && identity, Combiner && combine_op) const;
/**
\brief Applies a map/reduce operation to a sequence of data items.
\tparam InputIterator Iterator type for the input sequence.
\tparam Identity Type for the identity value.
\tparam Transformer Callable object type for the transformation.
\tparam Combiner Callable object type for the combination.
\param first Iterator to the first element of the sequence.
\param sequence_size Size of the input sequence.
\param identity Identity value for the reduction.
\param transform_op Transformation callable object.
\param combine_op Combination callable object.
\pre Iterators in the range `[first,last)` are valid.
\return The map/reduce result.
*/
template<typename ... InputIterators, typename Identity,
typename Transformer, typename Combiner>
auto map_reduce(std::tuple<InputIterators...> firsts,
std::size_t sequence_size,
Identity && identity,
Transformer && transform_op, Combiner && combine_op) const;
/**
\brief Applies a stencil to multiple sequences leaving the result in
another sequence.
\tparam InputIterators Iterator types for input sequences.
\tparam OutputIterator Iterator type for the output sequence.
\tparam StencilTransformer Callable object type for the stencil transformation.
\tparam Neighbourhood Callable object for generating neighbourhoods.
\param firsts Tuple of iterators to input sequences.
\param first_out Iterator to the output sequence.
\param sequence_size Size of the input sequences.
\param transform_op Stencil transformation callable object.
\param neighbour_op Neighbourhood callable object.
\pre For every I iterators in the range
`[get<I>(firsts), next(get<I>(firsts),sequence_size))` are valid.
\pre Iterators in the range `[first_out, next(first_out,sequence_size)]` are valid.
*/
template<typename ... InputIterators, typename OutputIterator,
typename StencilTransformer, typename Neighbourhood>
void stencil(std::tuple<InputIterators...> firsts, OutputIterator first_out,
std::size_t sequence_size,
StencilTransformer && transform_op,
Neighbourhood && neighbour_op) const;
/**
\brief Invoke \ref md_divide-conquer.
\tparam Input Type used for the input problem.
\tparam Divider Callable type for the divider operation.
\tparam Solver Callable type for the solver operation.
\tparam Combiner Callable type for the combiner operation.
\param ex Sequential execution policy object.
\param input Input problem to be solved.
\param divider_op Divider operation.
\param solver_op Solver operation.
\param combine_op Combiner operation.
*/
template<typename Input, typename Divider, typename Solver, typename Combiner>
[[deprecated("Use new interface with predicate argument")]]
auto divide_conquer(Input && input,
Divider && divide_op,
Solver && solve_op,
Combiner && combine_op) const;
/**
\brief Invoke \ref md_divide-conquer.
\tparam Input Type used for the input problem.
\tparam Divider Callable type for the divider operation.
\tparam Predicate Callable type for the stop condition predicate.
\tparam Solver Callable type for the solver operation.
\tparam Combiner Callable type for the combiner operation.
\param ex Sequential execution policy object.
\param input Input problem to be solved.
\param divider_op Divider operation.
\param predicate_op Predicate operation.
\param solver_op Solver operation.
\param combine_op Combiner operation.
*/
template<typename Input, typename Divider, typename Predicate, typename Solver, typename Combiner>
auto divide_conquer(Input && input,
Divider && divide_op,
Predicate && predicate_op,
Solver && solve_op,
Combiner && combine_op) const;
/**
\brief Invoke \ref md_pipeline.
\tparam Generator Callable type for the generator operation.
\tparam Transformers Callable types for the transformers in the pipeline.
\param generate_op Generator operation.
\param transform_ops Transformer operations.
*/
template<typename Generator, typename ... Transformers>
void pipeline(Generator && generate_op,
Transformers && ... transform_op) const;
/**
\brief Invoke \ref md_pipeline coming from another context
that uses mpmc_queues as communication channels.
\tparam InputType Type of the input stream.
\tparam Transformers Callable types for the transformers in the pipeline.
\tparam InputType Type of the output stream.
\param input_queue Input stream communicator.
\param transform_ops Transformer operations.
\param output_queue Input stream communicator.
*/
template<typename InputType, typename Transformer, typename OutputType>
void
pipeline(mpmc_queue <InputType> & input_queue, Transformer && transform_op,
mpmc_queue <OutputType> & output_queue) const
{
do_pipeline(input_queue, std::forward<Transformer>(transform_op),
output_queue);
}
/**
\brief Invoke \ref md_stream_pool.
\tparam Population Type for the initial population.
\tparam Selection Callable type for the selection operation.
\tparam Selection Callable type for the evolution operation.
\tparam Selection Callable type for the evaluation operation.
\tparam Selection Callable type for the termination operation.
\param population initial population.
\param selection_op Selection operation.
\param evolution_op Evolution operations.
\param eval_op Evaluation operation.
\param termination_op Termination operation.
*/
template<typename Population, typename Selection, typename Evolution,
typename Evaluation, typename Predicate>
void stream_pool(Population & population,
Selection && selection_op,
Evolution && evolve_op,
Evaluation && eval_op,
Predicate && termination_op) const;
private:
template<typename Input, typename Divider, typename Solver, typename Combiner>
auto divide_conquer(Input && input,
Divider && divide_op,
Solver && solve_op,
Combiner && combine_op,
std::atomic<int> & num_threads) const;
template<typename Input, typename Divider, typename Predicate, typename Solver, typename Combiner>
auto divide_conquer(Input && input,
Divider && divide_op,
Predicate && predicate_op,
Solver && solve_op,
Combiner && combine_op,
std::atomic<int> & num_threads) const;
template<typename Queue, typename Consumer,
requires_no_pattern <Consumer> = 0>
void do_pipeline(Queue & input_queue, Consumer && consume_op) const;
template<typename Inqueue, typename Transformer, typename output_type,
requires_no_pattern <Transformer> = 0>
void do_pipeline(Inqueue & input_queue, Transformer && transform_op,
mpmc_queue <output_type> & output_queue) const;
template<typename T, typename ... Others>
void do_pipeline(mpmc_queue <T> & in_q, mpmc_queue <T> & same_queue,
Others && ... ops) const;
template<typename T>
void do_pipeline(mpmc_queue <T> &) const {}
template<typename Queue, typename Transformer, typename ... OtherTransformers,
requires_no_pattern <Transformer> = 0>
void do_pipeline(Queue & input_queue, Transformer && transform_op,
OtherTransformers && ... other_ops) const;
template<typename Queue, typename Execution, typename Transformer,
template<typename, typename> class Context,
typename ... OtherTransformers,
requires_context <Context<Execution, Transformer>> = 0>
void do_pipeline(Queue & input_queue,
Context<Execution, Transformer> && context_op,
OtherTransformers && ... other_ops) const;
template<typename Queue, typename Execution, typename Transformer,
template<typename, typename> class Context,
typename ... OtherTransformers,
requires_context <Context<Execution, Transformer>> = 0>
void do_pipeline(Queue & input_queue,
Context<Execution, Transformer> & context_op,
OtherTransformers && ... other_ops) const
{
do_pipeline(input_queue, std::move(context_op),
std::forward<OtherTransformers>(other_ops)...);
}
template<typename Queue, typename FarmTransformer,
template<typename> class Farm,
requires_farm <Farm<FarmTransformer>> = 0>
void do_pipeline(Queue & input_queue,
Farm<FarmTransformer> & farm_obj) const
{
do_pipeline(input_queue, std::move(farm_obj));
}
template<typename Queue, typename FarmTransformer,
template<typename> class Farm,
requires_farm <Farm<FarmTransformer>> = 0>
void do_pipeline(Queue & input_queue,
Farm<FarmTransformer> && farm_obj) const;
template<typename Queue, typename FarmTransformer,
template<typename> class Farm,
typename ... OtherTransformers,
requires_farm <Farm<FarmTransformer>> = 0>
void do_pipeline(Queue & input_queue,
Farm<FarmTransformer> & farm_obj,
OtherTransformers && ... other_transform_ops) const
{
do_pipeline(input_queue, std::move(farm_obj),
std::forward<OtherTransformers>(other_transform_ops)...);
}
template<typename Queue, typename FarmTransformer,
template<typename> class Farm,
typename ... OtherTransformers,
requires_farm <Farm<FarmTransformer>> = 0>
void do_pipeline(Queue & input_queue,
Farm<FarmTransformer> && farm_obj,
OtherTransformers && ... other_transform_ops) const;
template<typename Queue, typename Predicate,
template<typename> class Filter,
requires_filter <Filter<Predicate>> = 0>
void do_pipeline(Queue & input_queue,
Filter<Predicate> & filter_obj) const
{
do_pipeline(input_queue, std::move(filter_obj));
}
template<typename Queue, typename Predicate,
template<typename> class Filter,
requires_filter <Filter<Predicate>> = 0>
void do_pipeline(Queue & input_queue,
Filter<Predicate> && filter_obj) const;
template<typename Queue, typename Predicate,
template<typename> class Filter,
typename ... OtherTransformers,
requires_filter <Filter<Predicate>> = 0>
void do_pipeline(Queue & input_queue,
Filter<Predicate> & filter_obj,
OtherTransformers && ... other_transform_ops) const
{
do_pipeline(input_queue, std::move(filter_obj),
std::forward<OtherTransformers>(other_transform_ops)...);
}
template<typename Queue, typename Predicate,
template<typename> class Filter,
typename ... OtherTransformers,
requires_filter <Filter<Predicate>> = 0>
void do_pipeline(Queue & input_queue,
Filter<Predicate> && filter_obj,
OtherTransformers && ... other_transform_ops) const;
template<typename Queue, typename Combiner, typename Identity,
template<typename C, typename I> class Reduce,
typename ... OtherTransformers,
requires_reduce <Reduce<Combiner, Identity>> = 0>
void
do_pipeline(Queue && input_queue, Reduce<Combiner, Identity> & reduce_obj,
OtherTransformers && ... other_transform_ops) const
{
do_pipeline(input_queue, std::move(reduce_obj),
std::forward<OtherTransformers>(other_transform_ops)...);
}
template<typename Queue, typename Combiner, typename Identity,
template<typename C, typename I> class Reduce,
typename ... OtherTransformers,
requires_reduce <Reduce<Combiner, Identity>> = 0>
void
do_pipeline(Queue && input_queue, Reduce<Combiner, Identity> && reduce_obj,
OtherTransformers && ... other_transform_ops) const;
template<typename Queue, typename Transformer, typename Predicate,
template<typename T, typename P> class Iteration,
typename ... OtherTransformers,
requires_iteration <Iteration<Transformer, Predicate>> = 0,
requires_no_pattern <Transformer> = 0>
void do_pipeline(Queue & input_queue,
Iteration<Transformer, Predicate> & iteration_obj,
OtherTransformers && ... other_transform_ops) const
{
do_pipeline(input_queue, std::move(iteration_obj),
std::forward<OtherTransformers>(other_transform_ops)...);
}
template<typename Queue, typename Transformer, typename Predicate,
template<typename T, typename P> class Iteration,
typename ... OtherTransformers,
requires_iteration <Iteration<Transformer, Predicate>> = 0,
requires_no_pattern <Transformer> = 0>
void do_pipeline(Queue & input_queue,
Iteration<Transformer, Predicate> && iteration_obj,
OtherTransformers && ... other_transform_ops) const;
template<typename Queue, typename Transformer, typename Predicate,
template<typename T, typename P> class Iteration,
typename ... OtherTransformers,
requires_iteration <Iteration<Transformer, Predicate>> = 0,
requires_pipeline <Transformer> = 0>
void do_pipeline(Queue & input_queue,
Iteration<Transformer, Predicate> && iteration_obj,
OtherTransformers && ... other_transform_ops) const;
template<typename Queue, typename ... Transformers,
template<typename...> class Pipeline,
typename ... OtherTransformers,
requires_pipeline <Pipeline<Transformers...>> = 0>
void do_pipeline(Queue & input_queue,
Pipeline<Transformers...> & pipeline_obj,
OtherTransformers && ... other_transform_ops) const
{
do_pipeline(input_queue, std::move(pipeline_obj),
std::forward<OtherTransformers>(other_transform_ops)...);
}
template<typename Queue, typename ... Transformers,
template<typename...> class Pipeline,
typename ... OtherTransformers,
requires_pipeline <Pipeline<Transformers...>> = 0>
void do_pipeline(Queue & input_queue,
Pipeline<Transformers...> && pipeline_obj,
OtherTransformers && ... other_transform_ops) const;
template<typename Queue, typename ... Transformers,
std::size_t ... I>
void do_pipeline_nested(
Queue & input_queue,
std::tuple<Transformers...> && transform_ops,
std::index_sequence<I...>) const;
private:
/**
\brief Obtain OpenMP platform number of threads.
Queries the current OpenMP number of threads so that it can be used in
initialization of data members.
\return The current OpenMP number of threads.
\note The determination is performed inside a parallel region.
*/
static int impl_concurrency_degree()
{
int result;
#pragma omp parallel
{
result = omp_get_num_threads();
}
return result;
}
private:
configuration<> config_{};
int concurrency_degree_ = config_.concurrency_degree();
bool ordering_ = config_.ordering();
int queue_size_ = config_.queue_size();
queue_mode queue_mode_ = config_.mode();
};
/**
\brief Metafunction that determines if type E is parallel_execution_omp
\tparam Execution policy type.
*/
template<typename E>
constexpr bool is_parallel_execution_omp()
{
return std::is_same<E, parallel_execution_omp>::value;
}
/**
\brief Determines if an execution policy is supported in the current compilation.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template<>
constexpr bool is_supported<parallel_execution_omp>() { return true; }
/**
\brief Determines if an execution policy supports the map pattern.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template<>
constexpr bool supports_map<parallel_execution_omp>() { return true; }
/**
\brief Determines if an execution policy supports the reduce pattern.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template<>
constexpr bool supports_reduce<parallel_execution_omp>() { return true; }
/**
\brief Determines if an execution policy supports the map-reduce pattern.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template<>
constexpr bool supports_map_reduce<parallel_execution_omp>() { return true; }
/**
\brief Determines if an execution policy supports the stencil pattern.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template<>
constexpr bool supports_stencil<parallel_execution_omp>() { return true; }
/**
\brief Determines if an execution policy supports the divide/conquer pattern.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template<>
constexpr bool
supports_divide_conquer<parallel_execution_omp>() { return true; }
/**
\brief Determines if an execution policy supports the pipeline pattern.
\note Specialization for parallel_execution_omp when GRPPI_OMP is enabled.
*/
template<>
constexpr bool supports_pipeline<parallel_execution_omp>() { return true; }
/**
\brief Determines if an execution policy supports the stream pool pattern.
\note Specialization for parallel_execution_native.
*/
template<>
constexpr bool supports_stream_pool<parallel_execution_omp>() { return true; }
template<typename ... InputIterators, typename OutputIterator,
typename Transformer>
void parallel_execution_omp::map(
std::tuple<InputIterators...> firsts,
OutputIterator first_out,
std::size_t sequence_size, Transformer transform_op) const
{
#pragma omp parallel for
for (std::size_t i = 0; i < sequence_size; ++i) {
first_out[i] = apply_iterators_indexed(transform_op, firsts, i);
}
}
template<typename InputIterator, typename Identity, typename Combiner>
auto parallel_execution_omp::reduce(
InputIterator first, std::size_t sequence_size,
Identity && identity,
Combiner && combine_op) const
{
constexpr sequential_execution seq;
using result_type = std::decay_t<Identity>;
std::vector<result_type> partial_results(concurrency_degree_);
auto process_chunk = [&](InputIterator f, std::size_t sz, std::size_t id) {
partial_results[id] = seq.reduce(f, sz, std::forward<Identity>(identity),
std::forward<Combiner>(combine_op));
};
const auto chunk_size = sequence_size / concurrency_degree_;
#pragma omp parallel
{
#pragma omp single nowait
{
for (int i = 0; i < concurrency_degree_ - 1; ++i) {
const auto delta = chunk_size * i;
const auto chunk_first = std::next(first, delta);
#pragma omp task firstprivate (chunk_first, chunk_size, i)
{
process_chunk(chunk_first, chunk_size, i);
}
}
//Main thread
const auto delta = chunk_size * (concurrency_degree_ - 1);
const auto chunk_first = std::next(first, delta);
const auto chunk_sz = sequence_size - delta;
process_chunk(chunk_first, chunk_sz, concurrency_degree_ - 1);
#pragma omp taskwait
}
}
return seq.reduce(std::next(partial_results.begin()),
partial_results.size() - 1,
partial_results[0], std::forward<Combiner>(combine_op));
}
template<typename ... InputIterators, typename Identity,
typename Transformer, typename Combiner>
auto parallel_execution_omp::map_reduce(
std::tuple<InputIterators...> firsts,
std::size_t sequence_size,
Identity && identity,
Transformer && transform_op, Combiner && combine_op) const
{
constexpr sequential_execution seq;
using result_type = std::decay_t<Identity>;
std::vector<result_type> partial_results(concurrency_degree_);
auto process_chunk = [&](auto f, std::size_t sz, std::size_t i) {
partial_results[i] = seq.map_reduce(
f, sz,
std::forward<Identity>(identity),
std::forward<Transformer>(transform_op),
std::forward<Combiner>(combine_op));
};
const auto chunk_size = sequence_size / concurrency_degree_;
#pragma omp parallel
{
#pragma omp single nowait
{
for (int i = 0; i < concurrency_degree_ - 1; ++i) {
#pragma omp task firstprivate(i)
{
const auto delta = chunk_size * i;
const auto chunk_firsts = iterators_next(firsts, delta);
process_chunk(chunk_firsts, chunk_size, i);
}
}
const auto delta = chunk_size * (concurrency_degree_ - 1);
auto chunk_firsts = iterators_next(firsts, delta);
auto chunk_last = std::next(std::get<0>(firsts), sequence_size);
process_chunk(chunk_firsts,
std::distance(std::get<0>(chunk_firsts), chunk_last),
concurrency_degree_ - 1);
#pragma omp taskwait
}
}
return seq.reduce(partial_results.begin(),
partial_results.size(), std::forward<Identity>(identity),
std::forward<Combiner>(combine_op));
}
template<typename ... InputIterators, typename OutputIterator,
typename StencilTransformer, typename Neighbourhood>
void parallel_execution_omp::stencil(
std::tuple<InputIterators...> firsts, OutputIterator first_out,
std::size_t sequence_size,
StencilTransformer && transform_op,
Neighbourhood && neighbour_op) const
{
constexpr sequential_execution seq;
const auto chunk_size = sequence_size / concurrency_degree_;
auto process_chunk = [&](auto f, std::size_t sz, std::size_t delta) {
seq.stencil(f, std::next(first_out, delta), sz,
std::forward<StencilTransformer>(transform_op),
std::forward<Neighbourhood>(neighbour_op));
};
#pragma omp parallel
{
#pragma omp single nowait
{
for (int i = 0; i < concurrency_degree_ - 1; ++i) {
#pragma omp task firstprivate(i)
{
const auto delta = chunk_size * i;
const auto chunk_firsts = iterators_next(firsts, delta);
process_chunk(chunk_firsts, chunk_size, delta);
}
}
const auto delta = chunk_size * (concurrency_degree_ - 1);
const auto chunk_firsts = iterators_next(firsts, delta);
const auto chunk_last = std::next(std::get<0>(firsts), sequence_size);
process_chunk(chunk_firsts,
std::distance(std::get<0>(chunk_firsts), chunk_last), delta);
#pragma omp taskwait
}
}
}
template<typename Input, typename Divider, typename Predicate, typename Solver, typename Combiner>
auto parallel_execution_omp::divide_conquer(
Input && input,
Divider && divide_op,
Predicate && predicate_op,
Solver && solve_op,
Combiner && combine_op) const
{
std::atomic<int> num_threads{concurrency_degree_ - 1};
return divide_conquer(std::forward<Input>(input),
std::forward<Divider>(divide_op), std::forward<Predicate>(predicate_op),
std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op),
num_threads);
}
template<typename Input, typename Divider, typename Solver, typename Combiner>
auto parallel_execution_omp::divide_conquer(
Input && input,
Divider && divide_op,
Solver && solve_op,
Combiner && combine_op) const
{
std::atomic<int> num_threads{concurrency_degree_ - 1};
return divide_conquer(std::forward<Input>(input),
std::forward<Divider>(divide_op), std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op),
num_threads);
}
template<typename Generator, typename ... Transformers>
void parallel_execution_omp::pipeline(
Generator && generate_op,
Transformers && ... transform_ops) const
{
using namespace std;
using result_type = decay_t<typename result_of<Generator()>::type>;
auto output_queue = make_queue<pair<result_type, long>>();
#pragma omp parallel
{
#pragma omp single nowait
{
#pragma omp task shared(generate_op, output_queue)
{
long order = 0;
for (;;) {
auto item = generate_op();
output_queue.push(make_pair(item, order++));
if (!item) { break; }
}
}
do_pipeline(output_queue,
forward<Transformers>(transform_ops)...);
#pragma omp taskwait
}
}
}
template<typename Population, typename Selection, typename Evolution,
typename Evaluation, typename Predicate>
void parallel_execution_omp::stream_pool(
[[maybe_unused]] Population & population,
[[maybe_unused]] Selection && selection_op,
[[maybe_unused]] Evolution && evolve_op,
[[maybe_unused]] Evaluation && eval_op,
[[maybe_unused]] Predicate && termination_op) const
{
std::cerr << "stream_pool currently unimplemented on OpenMP\n";
std::abort();
/*
using namespace std;
using selected_type = typename std::result_of<Selection(Population&)>::type;
using individual_type = typename Population::value_type;
using selected_op_type = optional<selected_type>;
using individual_op_type = optional<individual_type>;
if( population.size() == 0 ) return;
#pragma omp parallel
{
#pragma omp single nowait
{
auto selected_queue = make_queue<selected_op_type>();
auto output_queue = make_queue<individual_op_type>();
std::atomic<bool> end{false};
std::atomic<int> done_threads{0};
std::atomic_flag lock = ATOMIC_FLAG_INIT;
for(auto i = 0; i< concurrency_degree_-2; i++){
#pragma omp task shared(done_threads, end, selection_op, evolve_op, eval_op, selected_queue, output_queue)
{
auto selection = selected_queue.pop();
while(selection){
auto evolved = evolve_op(*selection);
auto filtered = eval_op(*selection, evolved);
if(termination_op(filtered)){
end = true;
}
output_queue.push({filtered});
selection = selected_queue.pop();
}
done_threads++;
if(done_threads == concurrency_degree_-2)
output_queue.push(individual_op_type{});
}
}
#pragma omp task shared(population, selected_queue, output_queue, done_threads,end,lock)
{
for(;;) {
if(end) break;
while(lock.test_and_set());
if( population.size() != 0 ){
auto selection = selection_op(population);
lock.clear();
selected_queue.push({selection});
}else{
lock.clear();
}
}
for(int i=0;i<concurrency_degree_-2;i++) selected_queue.push(selected_op_type{});
//output_queue.push(individual_op_type{});
}
#pragma omp task shared(population,lock, output_queue)
{
auto item = output_queue.pop();
while(item) {
while(lock.test_and_set());
population.push_back(*item);
lock.clear();
item = output_queue.pop();
}
}
#pragma omp taskwait
}
}
*/
}
// PRIVATE MEMBERS
template<typename Input, typename Divider, typename Predicate, typename Solver, typename Combiner>
auto parallel_execution_omp::divide_conquer(
Input && input,
Divider && divide_op,
Predicate && predicate_op,
Solver && solve_op,
Combiner && combine_op,
std::atomic<int> & num_threads) const
{
constexpr sequential_execution seq;
if (num_threads.load() <= 0) {
return seq.divide_conquer(std::forward<Input>(input),
std::forward<Divider>(divide_op),
std::forward<Predicate>(predicate_op),
std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op));
}
if (predicate_op(input)) { return solve_op(std::forward<Input>(input)); }
auto subproblems = divide_op(std::forward<Input>(input));
using subresult_type =
std::decay_t<typename std::result_of<Solver(Input)>::type>;
std::vector<subresult_type> partials(subproblems.size() - 1);
auto process_subproblems = [&, this](auto it, std::size_t div) {
partials[div] = this->divide_conquer(std::forward<Input>(*it),
std::forward<Divider>(divide_op),
std::forward<Predicate>(predicate_op),
std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op), num_threads);
};
int division = 0;
subresult_type subresult;
#pragma omp parallel
{
#pragma omp single nowait
{
auto i = subproblems.begin() + 1;
while (i != subproblems.end() && num_threads.load() > 0) {
#pragma omp task firstprivate(i, division) \
shared(partials, divide_op, solve_op, combine_op, num_threads)
{
process_subproblems(i, division);
}
num_threads--;
i++;
division++;
}
while (i != subproblems.end()) {
partials[division] = seq.divide_conquer(std::forward<Input>(*i++),
std::forward<Divider>(divide_op),
std::forward<Predicate>(predicate_op),
std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op));
}
//Main thread works on the first subproblem.
if (num_threads.load() > 0) {
subresult = divide_conquer(std::forward<Input>(*subproblems.begin()),
std::forward<Divider>(divide_op),
std::forward<Predicate>(predicate_op),
std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op), num_threads);
}
else {
subresult = seq.divide_conquer(
std::forward<Input>(*subproblems.begin()),
std::forward<Divider>(divide_op),
std::forward<Predicate>(predicate_op),
std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op));
}
#pragma omp taskwait
}
}
return seq.reduce(partials.begin(), partials.size(),
std::forward<subresult_type>(subresult), combine_op);
}
template<typename Input, typename Divider, typename Solver, typename Combiner>
auto parallel_execution_omp::divide_conquer(
Input && input,
Divider && divide_op,
Solver && solve_op,
Combiner && combine_op,
std::atomic<int> & num_threads) const
{
constexpr sequential_execution seq;
if (num_threads.load() <= 0) {
return seq.divide_conquer(std::forward<Input>(input),
std::forward<Divider>(divide_op), std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op));
}
auto subproblems = divide_op(std::forward<Input>(input));
if (subproblems.size() <= 1) {
return solve_op(std::forward<Input>(input));
}
using subresult_type =
std::decay_t<typename std::result_of<Solver(Input)>::type>;
std::vector<subresult_type> partials(subproblems.size() - 1);
auto process_subproblems = [&, this](auto it, std::size_t div) {
partials[div] = this->divide_conquer(std::forward<Input>(*it),
std::forward<Divider>(divide_op), std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op), num_threads);
};
int division = 0;
subresult_type subresult;
#pragma omp parallel
{
#pragma omp single nowait
{
auto i = subproblems.begin() + 1;
while (i != subproblems.end() && num_threads.load() > 0) {
#pragma omp task firstprivate(i, division) \
shared(partials, divide_op, solve_op, combine_op, num_threads)
{
process_subproblems(i, division);
}
num_threads--;
i++;
division++;
}
while (i != subproblems.end()) {
partials[division] = seq.divide_conquer(std::forward<Input>(*i++),
std::forward<Divider>(divide_op), std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op));
}
//Main thread works on the first subproblem.
if (num_threads.load() > 0) {
subresult = divide_conquer(std::forward<Input>(*subproblems.begin()),
std::forward<Divider>(divide_op), std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op), num_threads);
}
else {
subresult = seq.divide_conquer(
std::forward<Input>(*subproblems.begin()),
std::forward<Divider>(divide_op), std::forward<Solver>(solve_op),
std::forward<Combiner>(combine_op));
}
#pragma omp taskwait
}
}
return seq.reduce(partials.begin(), partials.size(),
std::forward<subresult_type>(subresult), combine_op);
}
template<typename Queue, typename Consumer,
requires_no_pattern <Consumer>>
void parallel_execution_omp::do_pipeline(Queue & input_queue,
Consumer && consume_op) const
{
using namespace std;
using input_type = typename Queue::value_type;
if (!is_ordered()) {
for (;;) {
auto item = input_queue.pop();
if (!item.first) { break; }
consume_op(*item.first);
}
return;
}
vector<input_type> elements;
long current = 0;
auto item = input_queue.pop();
while (item.first) {
if (current == item.second) {
consume_op(*item.first);
current++;
}
else {
elements.push_back(item);
}
auto it = find_if(elements.begin(), elements.end(),
[&](auto x) { return x.second == current; });
if (it != elements.end()) {
consume_op(*it->first);
elements.erase(it);
current++;
}
item = input_queue.pop();
}
while (elements.size() > 0) {
auto it = find_if(elements.begin(), elements.end(),
[&](auto x) { return x.second == current; });
if (it != elements.end()) {
consume_op(*it->first);
elements.erase(it);
current++;
}
}
}
template<typename Inqueue, typename Transformer, typename output_type,
requires_no_pattern <Transformer>>
void parallel_execution_omp::do_pipeline(Inqueue & input_queue,
Transformer && transform_op,
mpmc_queue <output_type> & output_queue) const
{
using namespace std;
using output_item_value_type = typename output_type::first_type::value_type;
for (;;) {
auto item{input_queue.pop()};
if (!item.first) { break; }
auto out = output_item_value_type{transform_op(*item.first)};
output_queue.push(make_pair(out, item.second));
}
}
template<typename Queue, typename Execution, typename Transformer,
template<typename, typename> class Context,
typename ... OtherTransformers,
requires_context <Context<Execution, Transformer>>>
void parallel_execution_omp::do_pipeline(Queue & input_queue,
Context<Execution, Transformer> && context_op,
OtherTransformers && ... other_ops) const
{
using namespace std;
using input_item_type = typename Queue::value_type;
using input_item_value_type = typename input_item_type::first_type::value_type;
using output_type = typename stage_return_type<input_item_value_type, Transformer>::type;
using output_optional_type = grppi::optional<output_type>;
using output_item_type = pair<output_optional_type, long>;
decltype(auto) output_queue =
get_output_queue<output_item_type>(other_ops...);
#pragma omp task shared(input_queue, context_op, output_queue)
{
context_op.execution_policy().pipeline(input_queue,
context_op.transformer(), output_queue);
output_queue.push(make_pair(output_optional_type{}, -1));
}
do_pipeline(output_queue,
forward<OtherTransformers>(other_ops)...);
#pragma omp taskwait
}
template<typename Queue, typename Transformer, typename ... OtherTransformers,
requires_no_pattern <Transformer>>
void parallel_execution_omp::do_pipeline(
Queue & input_queue,
Transformer && transform_op,
OtherTransformers && ... other_ops) const
{
using namespace std;
using input_type = typename Queue::value_type;
using input_value_type = typename input_type::first_type::value_type;
using result_type = typename result_of<Transformer(input_value_type)>::type;
using output_value_type = grppi::optional<result_type>;
using output_type = pair<output_value_type, long>;
decltype(auto) output_queue =
get_output_queue<output_type>(other_ops...);
#pragma omp task shared(transform_op, input_queue, output_queue)
{
for (;;) {
auto item = input_queue.pop();
if (!item.first) { break; }
auto out = output_value_type{transform_op(*item.first)};
output_queue.push(make_pair(out, item.second));
}
output_queue.push(make_pair(output_value_type{}, -1));
}
do_pipeline(output_queue,
forward<OtherTransformers>(other_ops)...);
}
template<typename Queue, typename FarmTransformer,
template<typename> class Farm,
requires_farm <Farm<FarmTransformer>>>
void parallel_execution_omp::do_pipeline(
Queue & input_queue,
Farm<FarmTransformer> && farm_obj) const
{
using namespace std;
for (int i = 0; i < farm_obj.cardinality(); ++i) {
#pragma omp task shared(farm_obj, input_queue)
{
auto item = input_queue.pop();
while (item.first) {
farm_obj(*item.first);
item = input_queue.pop();
}
input_queue.push(item);
}
}
#pragma omp taskwait
}
template<typename Queue, typename FarmTransformer,
template<typename> class Farm,
typename ... OtherTransformers,
requires_farm <Farm<FarmTransformer>>>
void parallel_execution_omp::do_pipeline(
Queue & input_queue,
Farm<FarmTransformer> && farm_obj,
OtherTransformers && ... other_transform_ops) const
{
using namespace std;
using input_type = typename Queue::value_type;
using input_value_type = typename input_type::first_type::value_type;
using result_type = typename stage_return_type<input_value_type, FarmTransformer>::type;
using output_optional_type = grppi::optional<result_type>;
using output_type = pair<output_optional_type, long>;
decltype(auto) output_queue =
get_output_queue<output_type>(other_transform_ops...);
// auto output_queue = make_queue<output_type>();
atomic<int> done_threads{0};
int ntask = farm_obj.cardinality();
for (int i = 0; i < farm_obj.cardinality(); ++i) {
#pragma omp task shared(done_threads, output_queue, farm_obj, input_queue, ntask)
{
do_pipeline(input_queue, farm_obj.transformer(), output_queue);
done_threads++;
if (done_threads == ntask) {
output_queue.push(make_pair(output_optional_type{}, -1));
}
else {
input_queue.push(input_type{});
}
}
}
do_pipeline(output_queue,
forward<OtherTransformers>(other_transform_ops)...);
#pragma omp taskwait
}
template<typename Queue, typename Predicate,
template<typename> class Filter,
requires_filter <Filter<Predicate>>>
void parallel_execution_omp::do_pipeline(
Queue &,
Filter<Predicate> &&) const
{
}
template<typename Queue, typename Predicate,
template<typename> class Filter,
typename ... OtherTransformers,
requires_filter <Filter<Predicate>>>
void parallel_execution_omp::do_pipeline(
Queue & input_queue,
Filter<Predicate> && filter_obj,
OtherTransformers && ... other_transform_ops) const
{
using namespace std;
using input_type = typename Queue::value_type;
using input_value_type = typename input_type::first_type;
auto filter_queue = make_queue<input_type>();
if (is_ordered()) {
auto filter_task = [&]() {
{
auto item{input_queue.pop()};
while (item.first) {
if (filter_obj.keep()) {
if (filter_obj(*item.first)) {
filter_queue.push(item);
}
else {
filter_queue.push(make_pair(input_value_type{}, item.second));
}
}
else {
if (!filter_obj(*item.first)) {
filter_queue.push(item);
}
else {
filter_queue.push(make_pair(input_value_type{}, item.second));
}
}
item = input_queue.pop();
}
filter_queue.push(make_pair(input_value_type{}, -1));
}
};
decltype(auto) output_queue =
get_output_queue<input_type>(other_transform_ops...);
auto reorder_task = [&]() {
vector<input_type> elements;
int current = 0;
long order = 0;
auto item = filter_queue.pop();
for (;;) {
if (!item.first && item.second == -1) { break; }
if (item.second == current) {
if (item.first) {
output_queue.push(make_pair(item.first, order++));
}
current++;
}
else {
elements.push_back(item);
}
auto it = find_if(elements.begin(), elements.end(),
[&](auto x) { return x.second == current; });
if (it != elements.end()) {
if (it->first) {
output_queue.push(make_pair(it->first, order));
order++;
}
elements.erase(it);
current++;
}
item = filter_queue.pop();
}
while (elements.size() > 0) {
auto it = find_if(elements.begin(), elements.end(),
[&](auto x) { return x.second == current; });
if (it != elements.end()) {
if (it->first) {
output_queue.push(make_pair(it->first, order));
order++;
}
elements.erase(it);
current++;
}
item = filter_queue.pop();
}
output_queue.push(item);
};
#pragma omp task shared(filter_queue, filter_obj, input_queue)
{
filter_task();
}
#pragma omp task shared (output_queue, filter_queue)
{
reorder_task();
}
do_pipeline(output_queue,
forward<OtherTransformers>(other_transform_ops)...);
#pragma omp taskwait
}
else {
auto filter_task = [&]() {
auto item = input_queue.pop();
while (item.first) {
if (filter_obj(*item.first)) {
filter_queue.push(item);
}
item = input_queue.pop();
}
filter_queue.push(make_pair(input_value_type{}, -1));
};
#pragma omp task shared(filter_queue, filter_obj, input_queue)
{
filter_task();
}
do_pipeline(filter_queue,
std::forward<OtherTransformers>(other_transform_ops)...);
#pragma omp taskwait
}
}
template<typename Queue, typename Combiner, typename Identity,
template<typename C, typename I> class Reduce,
typename ... OtherTransformers,
requires_reduce <Reduce<Combiner, Identity>>>
void parallel_execution_omp::do_pipeline(
Queue && input_queue,
Reduce<Combiner, Identity> && reduce_obj,
OtherTransformers && ... other_transform_ops) const
{
using namespace std;
using output_item_value_type = grppi::optional<decay_t<Identity>>;
using output_item_type = pair<output_item_value_type, long>;
decltype(auto) output_queue =
get_output_queue<output_item_type>(other_transform_ops...);
auto reduce_task = [&]() {
auto item{input_queue.pop()};
int order = 0;
while (item.first) {
reduce_obj.add_item(std::forward<Identity>(*item.first));
item = input_queue.pop();
if (reduce_obj.reduction_needed()) {
constexpr sequential_execution seq;
auto red = reduce_obj.reduce_window(seq);
output_queue.push(make_pair(red, order++));
}
}
output_queue.push(make_pair(output_item_value_type{}, -1));
};
#pragma omp task shared(reduce_obj, input_queue, output_queue)
{
reduce_task();
}
do_pipeline(output_queue,
std::forward<OtherTransformers>(other_transform_ops)...);
#pragma omp taskwait
}
template<typename Queue, typename Transformer, typename Predicate,
template<typename T, typename P> class Iteration,
typename ... OtherTransformers,
requires_iteration <Iteration<Transformer, Predicate>>,
requires_no_pattern <Transformer>>
void parallel_execution_omp::do_pipeline(
Queue & input_queue,
Iteration<Transformer, Predicate> && iteration_obj,
OtherTransformers && ... other_transform_ops) const
{
using namespace std;
using input_item_type = typename decay_t<Queue>::value_type;
decltype(auto) output_queue =
get_output_queue<input_item_type>(other_transform_ops...);
auto iteration_task = [&]() {
for (;;) {
auto item = input_queue.pop();
if (!item.first) { break; }
auto value = iteration_obj.transform(*item.first);
auto new_item = input_item_type{value, item.second};
if (iteration_obj.predicate(value)) {
output_queue.push(new_item);
}
else {
input_queue.push(new_item);
}
}
while (!input_queue.empty()) {
auto item = input_queue.pop();
auto value = iteration_obj.transform(*item.first);
auto new_item = input_item_type{value, item.second};
if (iteration_obj.predicate(value)) {
output_queue.push(new_item);
}
else {
input_queue.push(new_item);
}
}
output_queue.push(input_item_type{{}, -1});
};
#pragma omp task shared(iteration_obj, input_queue, output_queue)
{
iteration_task();
}
do_pipeline(output_queue,
std::forward<OtherTransformers>(other_transform_ops)...);
#pragma omp taskwait
}
template<typename Queue, typename Transformer, typename Predicate,
template<typename T, typename P> class Iteration,
typename ... OtherTransformers,
requires_iteration <Iteration<Transformer, Predicate>>,
requires_pipeline <Transformer>>
void parallel_execution_omp::do_pipeline(
Queue &,
Iteration<Transformer, Predicate> &&,
OtherTransformers && ...) const
{
static_assert(!is_pipeline<Transformer>, "Not implemented");
}
template<typename Queue, typename ... Transformers,
template<typename...> class Pipeline,
typename ... OtherTransformers,
requires_pipeline <Pipeline<Transformers...>>>
void parallel_execution_omp::do_pipeline(
Queue & input_queue,
Pipeline<Transformers...> && pipeline_obj,
OtherTransformers && ... other_transform_ops) const
{
do_pipeline_nested(
input_queue,
std::tuple_cat(pipeline_obj.transformers(),
std::forward_as_tuple(other_transform_ops...)),
std::make_index_sequence<
sizeof...(Transformers) + sizeof...(OtherTransformers)>());
}
template<typename Queue, typename ... Transformers,
std::size_t ... I>
void parallel_execution_omp::do_pipeline_nested(
Queue & input_queue,
std::tuple<Transformers...> && transform_ops,
std::index_sequence<I...>) const
{
do_pipeline(input_queue,
std::forward<Transformers>(std::get<I>(transform_ops))...);
}
template<typename T, typename... Others>
void parallel_execution_omp::do_pipeline(mpmc_queue <T> &, mpmc_queue <T> &,
Others && ...) const {}
} // end namespace grppi
#else // GRPPI_OMP undefined
namespace grppi {
/// Parallel execution policy.
/// Empty type if GRPPI_OMP disabled.
struct parallel_execution_omp {};
/**
\brief Metafunction that determines if type E is parallel_execution_omp
This metafunction evaluates to false if GRPPI_OMP is disabled.
\tparam Execution policy type.
*/
template <typename E>
constexpr bool is_parallel_execution_omp() {
return false;
}
}
#endif // GRPPI_OMP
#endif
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 4;
tile_size[1] = 4;
tile_size[2] = 8;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
1.race11.c | // RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s
#include <omp.h>
#define N 20
int main() {
int A[N][N];
#pragma omp parallel
{
#pragma omp for
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
A[i][j] = A[i][j - 1];
#pragma omp for
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
A[i][j] = A[i - 1][j];
#pragma omp for
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
A[i][j] = A[i][j - 1];
#pragma omp for
for (int i = 1; i < N; i++)
for (int j = 1; j < N; j++)
A[i][j] = A[i][j];
}
}
// Printing in reverse order. Need to fix
// CHECK: Region is Data Race Free
// CHECK: Region is Data Race Free
// CHECK: Data Race detected
// CHECK: Region is Data Race Free
// END
|
contact_residualbased_elimination_builder_and_solver_with_constraints.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Vicente Mataix Ferrandiz
//
//
#if !defined(KRATOS_CONTACT_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS )
#define KRATOS_CONTACT_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS
/* System includes */
#include <unordered_set>
#include <unordered_map>
/* External includes */
/* Project includes */
#include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_with_constraints.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class ContactResidualBasedEliminationBuilderAndSolverWithConstraints
* @ingroup ContactStructuralMechanicsApplication
* @brief Current class provides an implementation for contact builder and solving operations. (elimination)
* @details The RHS is constituted by the unbalanced loads (residual). Degrees of freedom are reordered putting the restrained degrees of freedom at the end of the system ordered in reverse order with respect to the DofSet and not considered the inactive ones. Imposition of the dirichlet conditions is naturally dealt with as the residual already contains this information. Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Vicente Mataix Ferrandiz
* @tparam TSparseSpace The sparse matrix system considered
* @tparam TDenseSpace The dense matrix system
* @tparam TLinearSolver The type of linear solver considered
*/
template<class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ContactResidualBasedEliminationBuilderAndSolverWithConstraints
: public ResidualBasedEliminationBuilderAndSolverWithConstraints< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
///@name Type Definitions
///@{
/// Pointer definition of ContactResidualBasedEliminationBuilderAndSolverWithConstraints
KRATOS_CLASS_POINTER_DEFINITION(ContactResidualBasedEliminationBuilderAndSolverWithConstraints);
/// Definitions dependent of the base class
typedef ResidualBasedEliminationBuilderAndSolverWithConstraints< TSparseSpace, TDenseSpace, TLinearSolver > BaseType;
/// Base types definitions
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodeType NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
/// General containers type definitions
typedef ModelPart::MasterSlaveConstraintContainerType ConstraintContainerType;
/// Additional definitions
typedef typename BaseType::ElementsContainerType ElementsContainerType;
typedef typename BaseType::EquationIdVectorType EquationIdVectorType;
typedef typename BaseType::DofsVectorType DofsVectorType;
/// DoF types definition
typedef typename BaseType::DofType DofType;
typedef typename BaseType::DofPointerType DofPointerType;
/// The DoF pointer vector type definition
typedef std::vector<typename DofType::Pointer> DofPointerVectorType;
/// The size type
typedef std::size_t SizeType;
/// The index type
typedef std::size_t IndexType;
/// Index set definition
typedef std::unordered_set<IndexType> IndexSetType;
///@}
///@name Enum's
///@{
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
ContactResidualBasedEliminationBuilderAndSolverWithConstraints(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BaseType(pNewLinearSystemSolver)
{
}
/** Destructor.
*/
~ContactResidualBasedEliminationBuilderAndSolverWithConstraints() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
/**
* @brief It organises the dofset in order to speed up the building phase
* @param rModelPart The model part to compute
*/
void SetUpSystem(
ModelPart& rModelPart
) override
{
if(rModelPart.MasterSlaveConstraints().size() > 0)
SetUpSystemWithConstraints(rModelPart);
else
BaseSetUpSystem(rModelPart);
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
) override
{
if(rModelPart.MasterSlaveConstraints().size() > 0)
SetUpDofSetWithConstraints(pScheme, rModelPart);
else
BaseType::SetUpDofSet(pScheme, rModelPart);
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element and condition its Dofs.
* @details Equivalent to the ResidualBasedEliminationBuilderAndSolver but with constraints. The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSetWithConstraints(
typename TSchemeType::Pointer pScheme,
ModelPart& rModelPart
)
{
KRATOS_TRY;
// We are going to enforce the existence of constraints for LM for each displacement dof
if (rModelPart.NodesBegin()->SolutionStepsDataHas(VECTOR_LAGRANGE_MULTIPLIER)) {
// Reorder constrains
IndexType constraint_id = 1;
for (auto& constrain : rModelPart.MasterSlaveConstraints()) {
constrain.SetId(constraint_id);
++constraint_id;
}
// Auxiliar dofs lists
DofsVectorType dof_list, second_dof_list; // NOTE: The second dof list is only used on constraints to include master/slave relations
// Contributions to the system
LocalSystemMatrixType transformation_matrix = LocalSystemMatrixType(0, 0);
LocalSystemVectorType constant_vector = LocalSystemVectorType(0);
// Reference constraint
const auto& r_clone_constraint = KratosComponents<MasterSlaveConstraint>::Get("LinearMasterSlaveConstraint");
#pragma omp parallel firstprivate(transformation_matrix, constant_vector, dof_list, second_dof_list)
{
// Current process info
ProcessInfo& r_current_process_info = rModelPart.GetProcessInfo();
// A buffer to store auxiliar constraints
ConstraintContainerType constraints_buffer;
// Gets the array of constraints from the modeler
auto& r_constraints_array = rModelPart.MasterSlaveConstraints();
const int number_of_constraints = static_cast<int>(r_constraints_array.size());
#pragma omp for schedule(guided, 512)
for (int i = 0; i < number_of_constraints; ++i) {
auto it_const = r_constraints_array.begin() + i;
// Gets list of Dof involved on every element
it_const->GetDofList(dof_list, second_dof_list, r_current_process_info);
it_const->CalculateLocalSystem(transformation_matrix, constant_vector, r_current_process_info);
DofPointerVectorType slave_dofs, master_dofs;
bool create_lm_constraint = false;
// We check if we have SLAVE nodes in the master dofs
bool slave_nodes_master_dof = false;
// Master DoFs
for (auto& p_dof : second_dof_list) {
if (IsDisplacementDof(*p_dof)) {
const IndexType node_id = p_dof->Id();
auto pnode = rModelPart.pGetNode(node_id);
if (pnode->Is(SLAVE)) { // The nodes computing contact are the slave nodes
slave_nodes_master_dof = true;
break;
}
}
}
// Slave DoFs
for (auto& p_dof : dof_list) {
if (IsDisplacementDof(*p_dof)) {
const IndexType node_id = p_dof->Id();
const auto& r_variable = p_dof->GetVariable();
auto pnode = rModelPart.pGetNode(node_id);
if (pnode->IsNot(INTERFACE) || slave_nodes_master_dof) { // Nodes from the contact interface cannot be slave DoFs
if (r_variable == DISPLACEMENT_X) {
slave_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_X));
} else if (r_variable == DISPLACEMENT_Y) {
slave_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_Y));
} else if (r_variable == DISPLACEMENT_Z) {
slave_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_Z));
}
} else { // We remove it
it_const->Set(TO_ERASE);
}
}
}
// Master DoFs
if (slave_nodes_master_dof) { // The nodes computing contact are the slave nodes
for (auto& p_dof : second_dof_list) {
if (IsDisplacementDof(*p_dof)) {
const IndexType node_id = p_dof->Id();
const auto& r_variable = p_dof->GetVariable();
auto pnode = rModelPart.pGetNode(node_id);
if (r_variable == DISPLACEMENT_X) {
master_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_X));
} else if (r_variable == DISPLACEMENT_Y) {
master_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_Y));
} else if (r_variable == DISPLACEMENT_Z) {
master_dofs.push_back(pnode->pGetDof(VECTOR_LAGRANGE_MULTIPLIER_Z));
}
}
}
}
// We check if we create constraints
if ((slave_dofs.size() == dof_list.size()) &&
(master_dofs.size() == second_dof_list.size())) {
create_lm_constraint = true;
}
// We create the new constraint
if (create_lm_constraint) {
auto p_constraint = r_clone_constraint.Create(constraint_id + i + 1, master_dofs, slave_dofs, transformation_matrix, constant_vector);
(constraints_buffer).insert((constraints_buffer).begin(), p_constraint);
}
}
// We transfer
#pragma omp critical
{
rModelPart.AddMasterSlaveConstraints(constraints_buffer.begin(),constraints_buffer.end());
}
}
}
// We remove the marked constraints
rModelPart.RemoveMasterSlaveConstraintsFromAllLevels(TO_ERASE);
KRATOS_INFO_IF("ContactResidualBasedEliminationBuilderAndSolverWithConstraints", (this->GetEchoLevel() > 0)) <<
"Model part after creating new constraints" << rModelPart << std::endl;
// Calling base SetUpDofSetWithConstraints
BaseType::SetUpDofSetWithConstraints(pScheme, rModelPart);
KRATOS_CATCH("");
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* @brief This method computes the equivalent coounter part of the SetUpSystem when using constraints
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystemWithConstraints(ModelPart& rModelPart)
{
KRATOS_TRY
// First we set up the system of equations without constraints
BaseSetUpSystem(rModelPart);
// Add the computation of the global ids of the solvable dofs
IndexType counter = 0;
for (auto& dof : BaseType::mDofSet) {
if (dof.EquationId() < BaseType::mEquationSystemSize) {
auto it = BaseType::mDoFSlaveSet.find(dof);
if (it == BaseType::mDoFSlaveSet.end()) {
++counter;
}
}
}
// The total system of equations to be solved
BaseType::mDoFToSolveSystemSize = counter;
KRATOS_CATCH("ContactResidualBasedEliminationBuilderAndSolverWithConstraints::FormulateGlobalMasterSlaveRelations failed ..");
}
/**
* @brief It organises the dofset in order to speed up the building phase (base one)
* @param rModelPart The model part to compute
*/
void BaseSetUpSystem(ModelPart& rModelPart)
{
/**
* Idem to the not contact version, except that if we fix the displacement in one slave node we should fix the corresponding LM for consistency
*/
// We create a set of dofs of the displacement slave dofs with LM associated
std::unordered_map<IndexType, IndexSetType> set_nodes_with_lm_associated;
if (rModelPart.HasSubModelPart("Contact"))
set_nodes_with_lm_associated.reserve(rModelPart.GetSubModelPart("Contact").NumberOfNodes());
// Allocating auxiliar parameters
IndexType node_id;
// We start the dof loop
for (auto& i_dof : BaseType::mDofSet) {
node_id = i_dof.Id();
if (IsLMDof(i_dof))
set_nodes_with_lm_associated.insert({node_id, IndexSetType({})});
}
// Auxiliar keys
const IndexType key_lm_x = VECTOR_LAGRANGE_MULTIPLIER_X.Key();
const IndexType key_lm_y = VECTOR_LAGRANGE_MULTIPLIER_Y.Key();
const IndexType key_lm_z = VECTOR_LAGRANGE_MULTIPLIER_Z.Key();
// We see which LM block
for (auto& i_dof : BaseType::mDofSet) {
node_id = i_dof.Id();
auto it = set_nodes_with_lm_associated.find(node_id);
if ( it != set_nodes_with_lm_associated.end()) {
if (i_dof.IsFixed()) {
const auto& r_variable = i_dof.GetVariable();
auto& aux_set = (it->second);
if (r_variable == DISPLACEMENT_X) {
aux_set.insert(key_lm_x);
} else if (r_variable == DISPLACEMENT_Y) {
aux_set.insert(key_lm_y);
} else if (r_variable == DISPLACEMENT_Z) {
aux_set.insert(key_lm_z);
}
}
}
}
// We do now the loop over the dofs
for (auto& i_dof : BaseType::mDofSet) {
if (i_dof.IsFree()) {
node_id = i_dof.Id();
auto it = set_nodes_with_lm_associated.find(node_id);
if (it != set_nodes_with_lm_associated.end()) {
auto& aux_set = it->second;
if (aux_set.find((i_dof.GetVariable()).Key()) != aux_set.end()) {
i_dof.FixDof();
}
}
}
}
BaseType::SetUpSystem(rModelPart);
}
/**
* @brief Checks if the degree of freedom belongs to a displacement DoF
* @param rDoF The degree of freedom
* @return True if the DoF corresponds with a displacement dof
*/
static inline bool IsDisplacementDof(const DofType& rDoF)
{
const auto& r_variable = rDoF.GetVariable();
if (r_variable == DISPLACEMENT_X ||
r_variable == DISPLACEMENT_Y ||
r_variable == DISPLACEMENT_Z) {
return true;
}
return false;
}
/**
* @brief Checks if the degree of freedom belongs to a LM DoF
* @param rDoF The degree of freedom
* @return True if the DoF corresponds with a LM dof
*/
static inline bool IsLMDof(const DofType& rDoF)
{
const auto& r_variable = rDoF.GetVariable();
if (r_variable == VECTOR_LAGRANGE_MULTIPLIER_X ||
r_variable == VECTOR_LAGRANGE_MULTIPLIER_Y ||
r_variable == VECTOR_LAGRANGE_MULTIPLIER_Z) {
return true;
}
return false;
}
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class ContactResidualBasedEliminationBuilderAndSolverWithConstraints */
///@}
///@name Type Definitions */
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_CONTACT_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_WITH_CONSTRAINTS defined */
|
fws_parfor.c | /*
Standard implementation of the Floyd-Warshall Algorithm
using OpenMP parallel for.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "util.h"
#include <stdio.h>
#include <omp.h>
inline int min(int a, int b);
int main(int argc, char **argv)
{
int **A;
int i,j,k;
struct timeval t1, t2;
double time;
int N=1024;
if (argc != 2) {
fprintf(stdout,"Usage: %s N\n", argv[0]);
exit(0);
}
N=atoi(argv[1]);
A = (int **) malloc(N*sizeof(int *));
for(i=0; i<N; i++) A[i] = (int *) malloc(N*sizeof(int));
graph_init_random(A,-1,N,128*N);
gettimeofday(&t1,0);
for(k=0;k<N;k++)
#pragma omp parallel for private(i, j) shared(A, k, N)
for(i=0; i<N; i++)
for(j=0; j<N; j++)
A[i][j]=min(A[i][j], A[i][k] + A[k][j]);
gettimeofday(&t2,0);
time=(double)((t2.tv_sec-t1.tv_sec)*1000000+t2.tv_usec-t1.tv_usec)/1000000;
printf("FW,%d,%.4f\n", N, time);
/*
for(i=0; i<N; i++)
for(j=0; j<N; j++) fprintf(stdout,"%d\n", A[i][j]);
*/
return 0;
}
inline int min(int a, int b)
{
if(a<=b)return a;
else return b;
}
|
sol1.c | /**
* \file
* \brief [Problem 26](https://projecteuler.net/problem=26) solution
* \author [Krishna Vedala](https://github.com/kvedala)
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#define MAX_DENO 2000 /**< limit of unit fractions */
#define MAX_LEN \
(MAX_DENO + 10) /**< length of resulting recurring fraction number */
/** comparison function for use with internal `qsort` algorithm */
int compare(const void *a, const void *b)
{
return (*(unsigned short *)a - *(unsigned short *)b);
}
/** Main function */
int main(int argc, char *argv[])
{
unsigned short max_digits = 0, max_idx_number = 0;
clock_t start_time = clock();
short deno;
#ifdef _OPENMP
#pragma omp for
#endif
for (deno = 2; deno < MAX_DENO; deno++)
{
unsigned short remainders[MAX_LEN];
unsigned short rem = 1, *rem_ptr = remainders;
memset(remainders, (unsigned short)-1,
MAX_LEN * sizeof(unsigned short));
// remainders[0] = 1;
// printf("1/%-4u\t ", deno);
unsigned short index = 0, num_digits;
while (rem != 0)
{
rem = (rem * 10) % deno;
if (rem == 0)
{
index = 0;
break;
}
rem_ptr = (unsigned short *)bsearch(
&rem, remainders, MAX_LEN, sizeof(unsigned short), compare);
// printf("%2d, ", rem);
// printf("(%14p), ", rem_ptr);
if (rem_ptr != NULL)
break;
remainders[index] = rem;
rem_ptr = remainders;
index++;
}
num_digits = index - (rem_ptr - remainders);
// printf("\n\t(%14p, %14p, %4u, %4u)\n", rem_ptr, remainders, index,
// num_digits);
#ifdef _OPENMP
#pragma omp critical
{
#endif
if (num_digits > max_digits)
{
max_digits = num_digits;
max_idx_number = deno;
// printf("\t (%u, %u)\n ", max_digits, max_idx_number);
}
#ifdef _OPENMP
}
#endif
}
clock_t end_time = clock();
printf("Time taken: %.4g ms\n",
1e3 * (double)(end_time - start_time) / CLOCKS_PER_SEC);
printf("Maximum digits: %hu\t Denominator: %hu\n", max_digits,
max_idx_number);
return 0;
}
|
distributions.h | /* Copyright 2015 The math21 Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#pragma once
#include "inner.h"
#include "ran.h"
namespace math21 {
void math21_random_draw(NumN &x, think::Random &ran);
void math21_random_draw(NumZ &x, think::Random &ran);
void math21_random_draw(NumR &x, think::Random &ran);
template<typename T, template<typename> class Container>
void math21_random_container_draw(Container<T> &A, think::Random &ran) {
MATH21_ASSERT(!A.isEmpty());
NumN i;
NumN n = A.size();
//#pragma omp parallel for
for (i = 1; i <= n; ++i) {
math21_random_draw(A.at(i), ran);
}
}
template<typename T>
void math21_random_draw(Tensor <T> &m, think::Random &ran) {
MATH21_ASSERT(!m.isEmpty());
return math21_random_container_draw(m, ran);
}
template<typename T>
void math21_op_random_draw(Tensor <T> &m, think::Random &ran) {
MATH21_ASSERT(!m.isEmpty());
if (m.is_cpu()) {
math21_random_container_draw(m, ran);
} else {
Tensor<T> m_c;
m_c.setSize(m.shape());
math21_random_container_draw(m_c, ran);
m = m_c;
}
}
NumR math21_pr_poisson_probability(NumN n, NumN lambda);
NumR math21_pr_binomial(NumN n, NumR p, NumN k);
NumB math21_pr_mvn_logpdf2(const MatR &x, const VecR &mean, const MatR &covariance, VecR &value);
void math21_pr_mvn_dYdX_diag_logpdf(const VecR &x, const VecR &mean, const MatR &covariance, MatR &dx);
void math21_pr_mvn_dYdmu_logpdf(const VecR &x, const VecR &mean, const MatR &covariance, MatR &dmu);
void math21_pr_mvn_dYdSig_logpdf(const VecR &x, const VecR &mean, const MatR &covariance, MatR &dSig);
} |
GiRaFFE_boundary_conditions.h | // Currently, we're using basic Cartesian boundary conditions, pending fixes by Zach.
// Part P8a: Declare boundary condition FACE_UPDATE macro,
// which updates a single face of the 3D grid cube
// using quadratic polynomial extrapolation.
// Basic extrapolation boundary conditions
#define FACE_UPDATE(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \
gfs[IDX4(which_gf,i0,i1,i2)] = \
+2.0*gfs[IDX4(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \
-1.0*gfs[IDX4(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)]; \
}
// +1.0*gfs[IDX4(which_gf,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \
// Basic Copy boundary conditions
#define FACE_UPDATE_COPY(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \
gfs[IDX4(which_gf,i0,i1,i2)] = gfs[IDX4(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)]; \
}
// Part P8b: Boundary condition driver routine: Apply BCs to all six
// boundary faces of the cube, filling in the innermost
// ghost zone first, and moving outward.
const int MAXFACE = -1;
const int NUL = +0;
const int MINFACE = +1;
// This macro acts differently in that it acts on an entire 3-vector of gfs, instead of 1.
// which_gf_0 corresponds to the zeroth component of that vector. The if statements only
// evaluate true if the velocity is directed inwards on the face in consideration.
#define FACE_UPDATE_OUTFLOW(which_gf, i0min,i0max, i1min,i1max, i2min,i2max, FACEX0,FACEX1,FACEX2) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) { \
aux_gfs[IDX4(which_gf,i0,i1,i2)] = \
+2.0*aux_gfs[IDX4(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \
-1.0*aux_gfs[IDX4(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)]; \
}
/* aux_gfs[IDX4(which_gf_0+1,i0,i1,i2)] = \
+3.0*aux_gfs[IDX4(which_gf_0+1,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \
-3.0*aux_gfs[IDX4(which_gf_0+1,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \
+1.0*aux_gfs[IDX4(which_gf_0+1,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \
aux_gfs[IDX4(which_gf_0+2,i0,i1,i2)] = \
+3.0*aux_gfs[IDX4(which_gf_0+2,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \
-3.0*aux_gfs[IDX4(which_gf_0+2,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \
+1.0*aux_gfs[IDX4(which_gf_0+2,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \
if(FACEX0*aux_gfs[IDX4(which_gf_0+0,i0,i1,i2)] > 0.0) { \
aux_gfs[IDX4(which_gf_0+0,i0,i1,i2)] = 0.0; \
} \
if(FACEX1*aux_gfs[IDX4(which_gf_0+1,i0,i1,i2)] > 0.0) { \
aux_gfs[IDX4(which_gf_0+1,i0,i1,i2)] = 0.0; \
} \
if(FACEX2*aux_gfs[IDX4(which_gf_0+2,i0,i1,i2)] > 0.0) { \
aux_gfs[IDX4(which_gf_0+2,i0,i1,i2)] = 0.0; \
} \
*/
void apply_bcs(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *gfs,REAL *aux_gfs) {
// First, we apply extrapolation boundary conditions to AD
#pragma omp parallel for
for(int which_gf=0;which_gf<NUM_EVOL_GFS;which_gf++) {
if(which_gf < STILDED0GF || which_gf > STILDED2GF) {
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
FACE_UPDATE(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--;
FACE_UPDATE(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++;
FACE_UPDATE(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--;
FACE_UPDATE(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++;
FACE_UPDATE(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE);
imin[2]--;
FACE_UPDATE(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE);
imax[2]++;
}
}
}
// Apply outflow/extrapolation boundary conditions to ValenciavU by passing VALENCIAVU0 as which_gf_0
for(int which_gf=VALENCIAVU0GF;which_gf<=VALENCIAVU2GF;which_gf++) {
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
FACE_UPDATE_OUTFLOW(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--;
FACE_UPDATE_OUTFLOW(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++;
FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--;
FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++;
FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE);
imin[2]--;
FACE_UPDATE_OUTFLOW(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE);
imax[2]++;
}
}
// Then, we apply copy boundary conditions to StildeD and psi6Phi
/*#pragma omp parallel for
for(int which_gf=3;which_gf<NUM_EVOL_GFS;which_gf++) {
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
FACE_UPDATE_COPY(which_gf, imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL); imin[0]--;
FACE_UPDATE_COPY(which_gf, imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL); imax[0]++;
FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL); imin[1]--;
FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL); imax[1]++;
FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE); imin[2]--;
FACE_UPDATE_COPY(which_gf, imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE); imax[2]++;
}
}*/
}
// A supplement to the boundary conditions for debugging. This will overwrite data with exact conditions
void FACE_UPDATE_EXACT(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3],
const int n, const REAL dt,REAL *out_gfs,REAL *aux_gfs,
const int i0min,const int i0max, const int i1min,const int i1max, const int i2min,const int i2max,
const int FACEX0,const int FACEX1,const int FACEX2) {
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
REAL xx0 = xx[0][i0]-n*dt;
REAL xx1 = xx[1][i1];
REAL xx2 = xx[2][i2];
if(xx0<=lbound) {
#include "../GiRaFFEfood_A_v_1D_tests_left.h"
}
else if (xx0<rbound) {
#include "../GiRaFFEfood_A_v_1D_tests_center.h"
}
else {
#include "../GiRaFFEfood_A_v_1D_tests_right.h"
}
out_gfs[IDX4(PSI6PHIGF, i0,i1,i2)] = 0.0;
}
}
void apply_bcs_EXACT(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3],
const int n, const REAL dt,
REAL *out_gfs,REAL *aux_gfs) {
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
// Right now, we only want to update the xmin and xmax faces with the exact data.
FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL);
imin[0]--;
FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL);
imax[0]++;
FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL);
imin[1]--;
FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL);
imax[1]++;
FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE);
imin[2]--;
FACE_UPDATE_EXACT(Nxx,Nxx_plus_2NGHOSTS,xx,n,dt,out_gfs,aux_gfs,imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE);
imax[2]++;
}
}
// A supplement to the boundary conditions for debugging. This will overwrite data with exact conditions
void FACE_UPDATE_EXACT_StildeD(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3],
REAL *out_gfs,REAL *out_gfs_exact,
const int i0min,const int i0max, const int i1min,const int i1max, const int i2min,const int i2max,
const int FACEX0,const int FACEX1,const int FACEX2) {
// This is currently modified to calculate more exact boundary conditions for StildeD. Rename if it works.
/*for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) {
#include "../GiRaFFEfood_HO_Stilde.h"
}*/
/*idx = IDX3(i0,i1,i2);
out_gfs[IDX4pt(STILDED0GF,idx)] = out_gfs_exact[IDX4pt(STILDED0GF,idx)];
out_gfs[IDX4pt(STILDED1GF,idx)] = out_gfs_exact[IDX4pt(STILDED1GF,idx)];
out_gfs[IDX4pt(STILDED2GF,idx)] = out_gfs_exact[IDX4pt(STILDED2GF,idx)];*/
}
void apply_bcs_EXACT_StildeD(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],REAL *xx[3],
REAL *out_gfs,REAL *out_gfs_exact) {
int imin[3] = { NGHOSTS, NGHOSTS, NGHOSTS };
int imax[3] = { Nxx_plus_2NGHOSTS[0]-NGHOSTS, Nxx_plus_2NGHOSTS[1]-NGHOSTS, Nxx_plus_2NGHOSTS[2]-NGHOSTS };
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
// Right now, we only want to update the xmin and xmax faces with the exact data.
FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0]-1,imin[0], imin[1],imax[1], imin[2],imax[2], MINFACE,NUL,NUL);
imin[0]--;
FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imax[0],imax[0]+1, imin[1],imax[1], imin[2],imax[2], MAXFACE,NUL,NUL);
imax[0]++;
//FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imin[1]-1,imin[1], imin[2],imax[2], NUL,MINFACE,NUL);
imin[1]--;
//FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imax[1],imax[1]+1, imin[2],imax[2], NUL,MAXFACE,NUL);
imax[1]++;
//FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imin[1],imax[1], imin[2]-1,imin[2], NUL,NUL,MINFACE);
imin[2]--;
//FACE_UPDATE_EXACT_StildeD(Nxx,Nxx_plus_2NGHOSTS,xx,out_gfs,out_gfs_exact,imin[0],imax[0], imin[1],imax[1], imax[2],imax[2]+1, NUL,NUL,MAXFACE);
imax[2]++;
}
} |
nautil.c | /*****************************************************************************
* *
* Auxiliary source file for version 2.2 of nauty. *
* *
* Copyright (1984-2002) Brendan McKay. All rights reserved. *
* Subject to waivers and disclaimers in nauty.h. *
* *
* CHANGE HISTORY *
* 10-Nov-87 : final changes for version 1.2 *
* 5-Dec-87 : renamed to version 1.3 (no changes to this file) *
* 28-Sep-88 : renamed to version 1.4 (no changes to this file) *
* 23-Mar-89 : changes for version 1.5 : *
* - added procedure refine1() *
* - changed type of ptn from int* to nvector* in fmptn() *
* - declared level in breakout() *
* - changed char[] to char* in a few places *
* - minor rearrangement in bestcell() *
* 31-Mar-89 : - added procedure doref() *
* 5-Apr-89 : - changed MAKEEMPTY uses to EMPTYSET *
* 12-Apr-89 : - changed writeperm() and fmperm() to not use MARKing *
* 5-May-89 : - redefined MASH to gain about 8% efficiency *
* 18-Oct-90 : changes for version 1.6 : *
* - improved line breaking in writeperm() *
* 10-Nov-90 : - added dummy routine nautil_null() *
* 27-Aug-92 : changes for version 1.7 : *
* - made linelength <= 0 mean no line breaks *
* 5-Jun-93 : renamed to version 1.7+ (no changes to this file) *
* 18-Aug-93 : renamed to version 1.8 (no changes to this file) *
* 17-Sep-93 : renamed to version 1.9 (no changes to this file) *
* 29-Jun-95 : changes for version 1.10 : *
* - replaced loop in nextelement() to save reference past *
* end of array (thanks to Kevin Maylsiak) *
* 11-Jul-96 : changes for version 2.0 : *
* - added alloc_error() *
* - added dynamic allocation *
* 21-Oct-98 : use 077777 in place of INFINITY for CLEANUP() *
* 9-Jan-00 : added nautil_check() *
* 12-Feb-00 : did a little formating of the code *
* 28-May-00 : added nautil_freedyn() *
* 16-Aug-00 : added OLDNAUTY behaviour *
* 16-Nov-00 : moved graph-specific things to naugraph.c *
* use function prototypes, remove UPROC, nvector *
* 22-Apr-01 : added code for compilation into Magma *
* removed nautil_null() *
* removed EXTDEFS and included labelorg *
* 21-Nov-01 : use NAUTYREQUIRED in nautil_check() *
* 26-Jun-02 : revised permset() to avoid fetch past the end of *
* the array (thanks to Jan Kieffer) *
* 17-Nov-03 : changed INFINITY to NAUTY_INFINITY *
* 14-Sep-04 : extended prototypes to recursive functions *
* *
*****************************************************************************/
#ifdef _OPENMP
#include <omp.h>
#endif
#define ONE_WORD_SETS
#include "nauty.h"
#ifdef NAUTY_IN_MAGMA
#include "io.e"
#endif
/* macros for hash-codes: */
#define MASH(l,i) ((((l) ^ 065435) + (i)) & 077777)
/* : expression whose long value depends only on long l and int/long i.
Anything goes, preferably non-commutative. */
#define CLEANUP(l) ((int)((l) % 077777))
/* : expression whose value depends on long l and is less than 077777
when converted to int then short. Anything goes. */
#if MAXM==1
#define M 1
#else
#define M m
#endif
#if !MAXN
DYNALLSTAT(permutation,workperm,workperm_sz);
#else
static permutation workperm[MAXN];
#endif
int labelorg = 0;
int findFirstOf(const set* x,unsigned len);
/* aproto: header new_nauty_protos.h */
/*****************************************************************************
* *
* nextelement(set1,m,pos) = the position of the first element in set set1 *
* which occupies a position greater than pos. If no such element exists, *
* the value is -1. pos can have any value less than n, including negative *
* values. *
* *
* GLOBALS ACCESSED: none *
* *
*****************************************************************************/
int
nextelement(set *set1, int m, int pos)
{
register setword setwd;
register int w, l;
#if MAXM==1
if (pos < 0) setwd = set1[0];
else setwd = set1[0] & BITMASK(pos);
if (setwd == 0) return -1;
else return FIRSTBIT(setwd);
#else
if (pos < 0)
{
w = 0;
setwd = set1[0];
}
else
{
w = SETWD(pos);
setwd = set1[w] & BITMASK(SETBT(pos));
}
// original BDM code here for reference.
for (;;)
{
if (setwd != 0) return TIMESWORDSIZE(w) + FIRSTBIT(setwd);
if (++w == m) return -1;
setwd = set1[w];
}
// correct, but slow!!
// if (setwd != 0) return TIMESWORDSIZE(w) + FIRSTBIT(setwd);
// ++w;
// l=m-w;
// if (l<=0 || (w+=findFirstOf(&set1[w],l))==m)
// return -1;
// else
// return TIMESWORDSIZE(w) + FIRSTBIT(set1[w]);
//
// int first_nonzero=m;
//
//
//#ifdef _OPENMP
// omp_lock_t critical_lock;
// omp_init_lock(&critical_lock);
//#pragma omp parallel firstprivate(w)
//#endif
// {
// int stride=1;
//#ifdef _OPENMP
// w+=omp_get_thread_num();
// stride=omp_get_num_threads();
//#endif
// for (++w; w<first_nonzero; w+=stride)
// {
// if (set1[w])
//#ifdef _OPENMP
// {
// // try to obtain a lock, testing exit condition
// while (!omp_test_lock(&critical_lock))
// {
//#pragma omp flush(first_nonzero)
// if (w>=first_nonzero)
// goto first_nonzero_found; //give up, we're beaten
// }
// if (w<first_nonzero)
// first_nonzero=w;
// omp_unset_lock(&critical_lock);
// }
//#pragma omp flush(first_nonzero)
// #else
// first_nonzero=w;
//#endif
// }
// first_nonzero_found:
// }
//
// if (first_nonzero == m)
// return -1;
// else
// return TIMESWORDSIZE(first_nonzero) + FIRSTBIT(set1[first_nonzero]);
#endif
}
/*****************************************************************************
* *
* permset(set1,set2,m,perm) defines set2 to be the set *
* {perm[i] | i in set1}. *
* *
* GLOBALS ACCESSED: bit<r>,leftbit<r> *
* *
*****************************************************************************/
void
permset(set *set1, set *set2, int m, permutation *perm)
{
register setword setw;
register int pos,w,b;
EMPTYSET(set2,m);
#if MAXM==1
setw = set1[0];
while (setw != 0)
{
TAKEBIT(b,setw);
pos = perm[b];
ADDELEMENT(set2,pos);
}
#else
for (w = 0; w < m; ++w)
{
setw = set1[w];
while (setw != 0)
{
TAKEBIT(b,setw);
pos = perm[TIMESWORDSIZE(w)+b];
ADDELEMENT(set2,pos);
}
}
#endif
}
/*****************************************************************************
* *
* putstring(f,s) writes the nul-terminated string s to file f. *
* *
*****************************************************************************/
void
putstring(FILE *f, char *s)
{
while (*s != '\0')
{
PUTC(*s,f);
++s;
}
}
/*****************************************************************************
* *
* itos(i,s) converts the int i to a nul-terminated decimal character *
* string s. The value returned is the number of characters excluding *
* the nul. *
* *
* GLOBALS ACCESSED: NONE *
* *
*****************************************************************************/
int
itos(int i, char *s)
{
register int digit,j,k;
register char c;
int ans;
if (i < 0)
{
k = 0;
i = -i;
j = 1;
s[0] = '-';
}
else
{
k = -1;
j = 0;
}
do
{
digit = i % 10;
i = i / 10;
s[++k] = digit + '0';
}
while (i);
s[k+1] = '\0';
ans = k + 1;
for (; j < k; ++j, --k)
{
c = s[j];
s[j] = s[k];
s[k] = c;
}
return ans;
}
/*****************************************************************************
* *
* orbits represents a partition of {0,1,...,n-1}, by orbits[i] = the *
* smallest element in the same cell as i. orbjoin(orbits,autom,n) updates *
* the partition orbits to the join of its current value and the cycle *
* partition of perm. The function value returned is the new number of *
* cells. *
* *
* GLOBALS ACCESSED: NONE *
* *
*****************************************************************************/
int
orbjoin(int *orbits, permutation *perm, int n)
{
register int i,j1,j2;
for (i = 0; i < n; ++i)
{
j1 = orbits[i];
while (orbits[j1] != j1) j1 = orbits[j1];
j2 = orbits[perm[i]];
while (orbits[j2] != j2) j2 = orbits[j2];
if (j1 < j2) orbits[j2] = j1;
else if (j1 > j2) orbits[j1] = j2;
}
j1 = 0;
for (i = 0; i < n; ++i)
if ((orbits[i] = orbits[orbits[i]]) == i) ++j1;
return j1;
}
/*****************************************************************************
* *
* writeperm(f,perm,cartesian,linelength,n) writes the permutation perm to *
* the file f. The cartesian representation (i.e. perm itself) is used if *
* cartesian != FALSE; otherwise the cyclic representation is used. No *
* more than linelength characters (not counting '\n') are written on each *
* line, unless linelength is ridiculously small. linelength<=0 causes no *
* line breaks at all to be made. The global int labelorg is added to each *
* vertex number. *
* *
* GLOBALS ACCESSED: itos(),putstring() *
* *
*****************************************************************************/
void
writeperm(FILE *f, permutation *perm, boolean cartesian, int linelength, int n)
{
register int i,k,l,curlen,intlen;
char s[30];
#if !MAXN
DYNALLOC1(permutation,workperm,workperm_sz,n,"writeperm");
#endif
/* CONDNL(x) writes end-of-line and 3 spaces if x characters
won't fit on the current line. */
#define CONDNL(x) if (linelength>0 && curlen+(x)>linelength)\
{putstring(f,"\n ");curlen=3;}
curlen = 0;
if (cartesian)
{
for (i = 0; i < n; ++i)
{
intlen = itos(perm[i]+labelorg,s);
CONDNL(intlen+1);
PUTC(' ',f);
putstring(f,s);
curlen += intlen + 1;
}
PUTC('\n',f);
}
else
{
for (i = n; --i >= 0;) workperm[i] = 0;
for (i = 0; i < n; ++i)
{
if (workperm[i] == 0 && perm[i] != i)
{
l = i;
intlen = itos(l+labelorg,s);
if (curlen > 3) CONDNL(2*intlen+4);
PUTC('(',f);
do
{
putstring(f,s);
curlen += intlen + 1;
k = l;
l = perm[l];
workperm[k] = 1;
if (l != i)
{
intlen = itos(l+labelorg,s);
CONDNL(intlen+2);
PUTC(' ',f);
}
}
while (l != i);
PUTC(')',f);
++curlen;
}
}
if (curlen == 0) putstring(f,"(1)\n");
else PUTC('\n',f);
}
}
/*****************************************************************************
* *
* fmperm(perm,fix,mcr,m,n) uses perm to construct fix and mcr. fix *
* contains those points are fixed by perm, while mcr contains the set of *
* those points which are least in their orbits. *
* *
* GLOBALS ACCESSED: bit<r> *
* *
*****************************************************************************/
void
fmperm(permutation *perm, set *fix, set *mcr, int m, int n)
{
register int i,k,l;
#if !MAXN
DYNALLOC1(permutation,workperm,workperm_sz,n,"writeperm");
#endif
EMPTYSET(fix,m);
EMPTYSET(mcr,m);
for (i = n; --i >= 0;) workperm[i] = 0;
for (i = 0; i < n; ++i)
if (perm[i] == i)
{
ADDELEMENT(fix,i);
ADDELEMENT(mcr,i);
}
else if (workperm[i] == 0)
{
l = i;
do
{
k = l;
l = perm[l];
workperm[k] = 1;
}
while (l != i);
ADDELEMENT(mcr,i);
}
}
/*****************************************************************************
* *
* fmptn(lab,ptn,level,fix,mcr,m,n) uses the partition at the specified *
* level in the partition nest (lab,ptn) to make sets fix and mcr. fix *
* represents the points in trivial cells of the partition, while mcr *
* represents those points which are least in their cells. *
* *
* GLOBALS ACCESSED: bit<r> *
* *
*****************************************************************************/
void
fmptn(int *lab, int *ptn, int level, set *fix, set *mcr, int m, int n)
{
register int i,lmin;
EMPTYSET(fix,m);
EMPTYSET(mcr,m);
for (i = 0; i < n; ++i)
if (ptn[i] <= level)
{
ADDELEMENT(fix,lab[i]);
ADDELEMENT(mcr,lab[i]);
}
else
{
lmin = lab[i];
do
if (lab[++i] < lmin) lmin = lab[i];
while (ptn[i] > level);
ADDELEMENT(mcr,lmin);
}
}
/*****************************************************************************
* *
* doref(g,lab,ptn,level,numcells,qinvar,invar,active,code,refproc, *
* invarproc,mininvarlev,maxinvarlev,invararg,digraph,m,n) *
* is used to perform a refinement on the partition at the given level in *
* (lab,ptn). The number of cells is *numcells both for input and output. *
* The input active is the active set for input to the refinement procedure *
* (*refproc)(), which must have the argument list of refine(). *
* active may be arbitrarily changed. invar is used for working storage. *
* First, (*refproc)() is called. Then, if invarproc!=NULL and *
* |mininvarlev| <= level <= |maxinvarlev|, the routine (*invarproc)() is *
* used to compute a vertex-invariant which may refine the partition *
* further. If it does, (*refproc)() is called again, using an active set *
* containing all but the first fragment of each old cell. Unless g is a *
* digraph, this guarantees that the final partition is equitable. The *
* arguments invararg and digraph are passed to (*invarproc)() *
* uninterpretted. The output argument code is a composite of the codes *
* from all the calls to (*refproc)(). The output argument qinvar is set *
* to 0 if (*invarproc)() is not applied, 1 if it is applied but fails to *
* refine the partition, and 2 if it succeeds. *
* See the file nautinv.c for a further discussion of vertex-invariants. *
* Note that the dreadnaut I command generates a call to this procedure *
* with level = mininvarlevel = maxinvarlevel = 0. *
* *
*****************************************************************************/
void
doref(graph *g, int *lab, int *ptn, int level, int *numcells,
int *qinvar, permutation *invar, set *active, int *code,
void (*refproc)(graph*,int*,int*,int,int*,permutation*,set*,int*,int,int),
void (*invarproc)(graph*,int*,int*,int,int,int,permutation*,
int,boolean,int,int),
int mininvarlev, int maxinvarlev, int invararg,
boolean digraph, int m, int n)
{
register int j,h;
register permutation pw;
int iw;
int i,cell1,cell2,nc,tvpos,minlev,maxlev;
long longcode;
boolean same;
#if !MAXN
DYNALLOC1(permutation,workperm,workperm_sz,n,"doref");
#endif
if ((tvpos = nextelement(active,M,-1)) < 0) tvpos = 0;
(*refproc)(g,lab,ptn,level,numcells,invar,active,code,M,n);
minlev = (mininvarlev < 0 ? -mininvarlev : mininvarlev);
maxlev = (maxinvarlev < 0 ? -maxinvarlev : maxinvarlev);
if (invarproc != NULL && *numcells < n
&& level >= minlev && level <= maxlev)
{
(*invarproc)(g,lab,ptn,level,*numcells,tvpos,invar,invararg,
digraph,M,n);
EMPTYSET(active,m);
for (i = n; --i >= 0;) workperm[i] = invar[lab[i]];
nc = *numcells;
for (cell1 = 0; cell1 < n; cell1 = cell2 + 1)
{
pw = workperm[cell1];
same = TRUE;
for (cell2 = cell1; ptn[cell2] > level; ++cell2)
if (workperm[cell2+1] != pw) same = FALSE;
if (same) continue;
j = (cell2 - cell1 + 1) / 3;
h = 1;
do
h = 3 * h + 1;
while (h < j);
do /* shell sort */
{
for (i = cell1 + h; i <= cell2; ++i)
{
iw = lab[i];
pw = workperm[i];
for (j = i; workperm[j-h] > pw; )
{
workperm[j] = workperm[j-h];
lab[j] = lab[j-h];
if ((j -= h) < cell1 + h) break;
}
workperm[j] = pw;
lab[j] = iw;
}
h /= 3;
}
while (h > 0);
for (i = cell1 + 1; i <= cell2; ++i)
if (workperm[i] != workperm[i-1])
{
ptn[i-1] = level;
++*numcells;
ADDELEMENT(active,i);
}
}
if (*numcells > nc)
{
*qinvar = 2;
longcode = *code;
(*refproc)(g,lab,ptn,level,numcells,invar,active,code,M,n);
longcode = MASH(longcode,*code);
*code = CLEANUP(longcode);
}
else
*qinvar = 1;
}
else
*qinvar = 0;
}
/*****************************************************************************
* *
* targetcell(g,lab,ptn,level,numcells,tcell,tcellsize,&cellpos,tc_level, *
* hint,goodcell,m,n) *
* examines the partition at the specified level in the partition nest *
* (lab,ptn) and finds a non-trival cell (if none, the first cell). *
* If hint >= 0 and there is a non-trivial cell starting at position hint *
* in lab, that cell is chosen. *
* Else, If level <= tc_level, *goodcell is called to choose a cell. *
* Else, the first non-trivial cell is chosen. *
* When a cell is chosen, tcell is set to its contents, *tcellsize to its *
* size, and cellpos to its starting position in lab. *
* *
* GLOBALS ACCESSED: bit<r> *
* *
*****************************************************************************/
void
targetcell(graph *g, int *lab, int *ptn, int level, int numcells,
set *tcell, int *tcellsize, int *cellpos, int tc_level,
int hint, int (*goodcell)(graph*,int*,int*,int,int,int,int),
int m, int n)
{
register int i,j,k;
if (hint >= 0 && ptn[hint] > level &&
(hint == 0 || ptn[hint-1] <= level))
i = hint;
else if (level <= tc_level && goodcell != NULL)
i = (*goodcell)(g,lab,ptn,level,tc_level,m,n);
else
for (i = 0; i < n && ptn[i] <= level; ++i) {}
if (i == n)
i = j = 0;
else
for (j = i + 1; ptn[j] > level; ++j) {}
*tcellsize = j - i + 1;
EMPTYSET(tcell,m);
for (k = i; k <= j; ++k) ADDELEMENT(tcell,lab[k]);
*cellpos = i;
}
/*****************************************************************************
* *
* shortprune(set1,set2,m) ANDs the contents of set set2 into set set1. *
* *
* GLOBALS ACCESSED: NONE *
* *
*****************************************************************************/
void
shortprune(set *set1, set *set2, int m)
{
register int i;
for (i = 0; i < M; ++i) INTERSECT(set1[i],set2[i]);
}
/*****************************************************************************
* *
* breakout(lab,ptn,level,tc,tv,active,m) operates on the partition at *
* the specified level in the partition nest (lab,ptn). It finds the *
* element tv, which is in the cell C starting at index tc in lab (it had *
* better be) and splits C in the two cells {tv} and C\{tv}, in that order. *
* It also sets the set active to contain just the element tc. *
* *
* GLOBALS ACCESSED: bit<r> *
* *
*****************************************************************************/
void
breakout(int *lab, int *ptn, int level, int tc, int tv,
set *active, int m)
{
register int i,prev,next;
EMPTYSET(active,m);
ADDELEMENT(active,tc);
i = tc;
prev = tv;
do
{
next = lab[i];
lab[i++] = prev;
prev = next;
}
while (prev != tv);
ptn[tc] = level;
}
/*****************************************************************************
* *
* longprune(tcell,fix,bottom,top,m) removes zero or elements of the set *
* tcell. It is assumed that addresses bottom through top-1 contain *
* contiguous pairs of sets (f1,m1),(f2,m2), ... . tcell is intersected *
* with each mi such that fi is a subset of fix. *
* *
* GLOBALS ACCESSED: NONE *
* *
*****************************************************************************/
void
longprune(set *tcell, set *fix, set *bottom, set *top, int m)
{
register int i;
while (bottom < top)
{
for (i = 0; i < M; ++i)
if (NOTSUBSET(fix[i],bottom[i])) break;
bottom += M;
if (i == M)
for (i = 0; i < M; ++i) INTERSECT(tcell[i],bottom[i]);
bottom += M;
}
}
/*****************************************************************************
* *
* nautil_check() checks that this file is compiled compatibly with the *
* given parameters. If not, call exit(1). *
* *
*****************************************************************************/
void
nautil_check(int wordsize, int m, int n, int version)
{
if (wordsize != WORDSIZE)
{
fprintf(ERRFILE,"Error: WORDSIZE mismatch in nautil.c\n");
exit(1);
}
#if MAXN
if (m > MAXM)
{
fprintf(ERRFILE,"Error: MAXM inadequate in nautil.c\n");
exit(1);
}
if (n > MAXN)
{
fprintf(ERRFILE,"Error: MAXN inadequate in nautil.c\n");
exit(1);
}
#endif
#ifdef BIGNAUTY
if ((version & 1) == 0)
{
fprintf(ERRFILE,"Error: BIGNAUTY mismatch in nautil.c\n");
exit(1);
}
#else
if ((version & 1) == 1)
{
fprintf(ERRFILE,"Error: BIGNAUTY mismatch in nautil.c\n");
exit(1);
}
#endif
if (version < NAUTYREQUIRED)
{
fprintf(ERRFILE,"Error: nautil.c version mismatch\n");
exit(1);
}
}
/*****************************************************************************
* *
* alloc_error() writes a message and exits. Used by DYNALLOC? macros. *
* *
*****************************************************************************/
void
alloc_error(char *s)
{
fprintf(ERRFILE,"Dynamic allocation failed: %s\n",s);
exit(2);
}
/*****************************************************************************
* *
* nautil_freedyn() - free the dynamic memory in this module *
* *
*****************************************************************************/
void
nautil_freedyn(void)
{
#if !MAXN
DYNFREE(workperm,workperm_sz);
#endif
}
|
cryptsha512_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* based on rawSHA256_fmt.c code and Drepper's spec at
* http://www.akkadia.org/drepper/SHA-crypt.txt
*
* This software is Copyright (c) 2012 magnum, and it is hereby released to the
* general public under the following terms: Redistribution and use in source
* and binary forms, with or without modification, are permitted.
*
* See code/comments in cryptsha256 for how and why this is being done. NOTE,
* we could limit ourselves to 15 byte password, and then only need 1 limb
* SHA512 SIMD logic. If we allow 2 limb logic then 79 byte passwords are max.
* this is better than cryptsha256, where if we only allowed 1 limb, then only
* 3 btye passwords would have been max, and even at 2 limbs, 35 byte passwords
* are the longest we can do.
*
* Porting to SSE2, May 2015, JimF. A little harder than some, since we have to
* group and rearrange passwords based upon length. We must only run passwords
* of a specific block group size in 1 SSE_COEF_SHA512 bundle. If we later do
* PARA_SHA512, then each bundle of SSE_COEF_SHA512*PARA_SHA512 will have to be
* made up of passwords of same block group size.
*
* Here are the block sizes per password length. To be equal group size, all
* numbers for 2 passwords must be equal all the way across. So, password
* lengths of 0, 1, ... 15 are 1 group. 16..23 are another group. 24..31 are
* yet another, etc. There are 5 'groups' of lengths.
*
* Here is the raw block length data. Only first and last length for the group has been kept.
Len: cp pspc cspp ppc cpp psc csp pc
0 : 1 1 1 1 1 1 1 1
15 : 1 1 1 1 1 1 1 1
16 : 1 2 2 1 1 1 1 1
23 : 1 2 2 1 1 1 1 1
24 : 1 2 2 2 2 1 1 1
31 : 1 2 2 2 2 1 1 1
32 : 1 2 2 2 2 2 2 1
47 : 1 2 2 2 2 2 2 1
48 : 2 2 2 2 2 2 2 2
79 : 2 2 2 2 2 2 2 2
Source to make above table (made up to 90,but over 79 is 3 limbs)
#include <stdio.h>
int c=64, s=16;
int S(int sz) {
if (sz<=111) return 1;
else if (sz <= 111+128) return 2;
else return 3;
}
void proc(int p) {
int cp=p+c;
printf("%-2d : %d %d %d %d %d %d %d %d\n",
p,S(cp),S(cp+s+p),S(cp+s+p),S(cp+p),S(cp+p),S(cp+s),S(cp+s),S(cp));
}
void main(int argc, char **argv) {
int i;
if (argc==2) s=atoi(argv[1]);
printf ("Len: cp pspc cspp ppc cpp psc csp pc (saltlen=%d)\n",s);
for (i = 0; i < 90; ++i)
proc(i);
}
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_cryptsha512;
#elif FMT_REGISTERS_H
john_register_one(&fmt_cryptsha512);
#else
#include "arch.h"
//#undef SIMD_COEF_64
#include "sha2.h"
#define _GNU_SOURCE 1
#include <string.h>
#include "params.h"
#include "common.h"
#include "formats.h"
#include "johnswap.h"
#include "simd-intrinsics.h"
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 16
#endif
#include <omp.h>
#endif
#include "memdbg.h"
// NOTE, in SSE mode, even if NOT in OMP, we may need to scale, quite a bit, due to needing
// to 'group' passwords differently, so that we have lengths which 'share' the same number
// of crypt block counts for each 'type'. We may want to scale as much as 128 or so, just
// to try to have better saturation. If we only had 8 passwords given to us, and they were
// one each of these lengths: 3 7 8 12 13 14 15 21, in theory, we could do this
// with only 2 SSE calls (SIMD_COEF_32==4 for SHA256). However, length 3 has to to run by itself,
// length 7 by itself, 8 by itself, and the rest can run together, but there are 5 of them,
// so it takes to runs. So, instead of 2 runs, we have to do 5 runs. Not very efficient.
// however, if we have a lot more passwords to work with, we can re-arrange them, to run
// them in groups that all 'fit' together, and do so until we exhaust all from a given length
// range, then do all in the next range. Thus, until we get to the last set within a length
// range, we are doing a fully packed SSE run, and having a LOT less wasted space. This will
// get even more interesting, when we start doing OMP, but it should just be the same principal,
// preload more passwords, and group them, then run the OMP threads over a single length, then
// go to the next length, until done, trying to keep each thread running, and keeping each block
// of SSE data full, until the last in a range. We probably can simply build all the rearrangments,
// then let the threads go on ALL data, without caring about the length, since each thread will only
// be working on passwords in a single MMX buffer that all match, at any given moment.
#ifdef SIMD_COEF_64
#ifdef _OPENMP
#define SIMD_COEF_SCALE (32/SIMD_COEF_64)
#else
#define SIMD_COEF_SCALE (64/SIMD_COEF_64)
#endif
#else
#define SIMD_COEF_SCALE 1
#endif
#define FORMAT_LABEL "sha512crypt"
#ifdef SIMD_COEF_64
#define ALGORITHM_NAME SHA512_ALGORITHM_NAME
#else
#if ARCH_BITS >= 64
#define ALGORITHM_NAME "64/" ARCH_BITS_STR " " SHA2_LIB
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
#endif
// 79 is max length we can do in 2 SIMD limbs, so just make it 79 always.
#define PLAINTEXT_LENGTH 79
#define BINARY_ALIGN 4
#define SALT_SIZE sizeof(struct saltstruct)
#define SALT_ALIGN 4
#ifdef SIMD_COEF_64
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
// these MUST be defined prior to loading cryptsha512_valid.h
#define BINARY_SIZE 64
#define SALT_LENGTH 16
#define CIPHERTEXT_LENGTH 86
#define __CRYPTSHA512_CREATE_PROPER_TESTS_ARRAY__
#include "cryptsha512_common.h"
#define BLKS MAX_KEYS_PER_CRYPT
/* This structure is 'pre-loaded' with the keyspace of all possible crypts which */
/* will be performed WITHIN the inner loop. There are 8 possible buffers that */
/* are used. They are cp, pspc, cspp, ppc, cpp, psc, csp, and pc, where p stands */
/* for the 'hash' built from the password (and it is the same length as the */
/* password), s stands for the hash built from the salt (same size as salt), and */
/* c stands for the crypt results from the prior loop. There are 8 possible */
/* buffer layouts listed, but they fall into a pattern that is 42 long (2*3*7) */
/* this structure encapsulates this. we build this buffer, after computing the */
/* s hash, the p hash, and the starting c values. Then, within the inner loop, */
/* we simply spin through this structure, calling the SHA512 code to do the work. */
/* NOTE, most of the time, there will be 1 block and 2 block crypts. As the */
/* the password length grows, the more 2 block crypts there are, thus slower */
/**/
/* for SSE only, but 'could' be done for sha2.c code (jtr sha2) */
/* This keyspace was changed, to be put into BE at the start, and then we never */
/* do any swapping, but keep it in BE format from that point on. To do this, we */
/* changed the pointers to be a pointer to the start of the block, AND an offset */
/* for SSE, we need a pointer to the start of the block[0], and the offset. The */
/* index needed will be known in the crypt_all. This means we need something */
/* similar to out GET_POS macros, but also for oSSL formats. */
/* To do this, we have to use the JtR sha2.c functions, since there is this func: */
/* sha512_hash_block(&CTX, data, int perform_endian_swap). So if we set the last */
/* param to 0, we can call this function, and it will avoid the byte swapping */
typedef struct cryptloopstruct_t {
unsigned char buf[8*2*128*BLKS]; // will allocate to hold 42 2 block buffers (42 * 2 * 128) Reduced to only requiring 8*2*128
// now, the cryptstructs are on the stack within the crypt for loop, so we avoid allocation.
// and to avoid the single static variable, or a static array.
unsigned char *bufs[BLKS][42]; // points to the start of each 2 block buffer.
#ifdef SIMD_COEF_64
int offs[BLKS][42];
#endif
unsigned char *cptr[BLKS][42]; // points to where we copy the crypt pointer for next round.
// Round 0 points to somewhere in round 1's buffer, etc.
int datlen[42]; // if 1, then this is a small, only 1 block crypt. Some rounds for shorter passwords take only 1 crypt block.
// NOTE, datlen could be changed to a number, and then we could do > 2 block crypts. Would take a little
// more memory (and longer PW's certainly DO take more time), but it should work fine. It may be an issue
// especially when doing OMP, that the memory footprint of this 'hot' inner loop simply gets too big, and
// things slow down. For now, we are limiting ourselves to 35 byte password, which fits into 2 SHA512 buffers
} cryptloopstruct;
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
/* these 2 values are used in setup of the cryptloopstruct, AND to do our SHA512_Init() calls, in the inner loop */
static const unsigned char padding[256] = { 0x80, 0 /* 0,0,0,0.... */ };
#if !defined(JTR_INC_COMMON_CRYPTO_SHA2) && !defined (SIMD_COEF_64)
static const uint64_t ctx_init[8] =
{0x6A09E667F3BCC908ULL,0xBB67AE8584CAA73BULL,0x3C6EF372FE94F82BULL,0xA54FF53A5F1D36F1ULL,0x510E527FADE682D1ULL,0x9B05688C2B3E6C1FULL,0x1F83D9ABFB41BD6BULL,0x5BE0CD19137E2179ULL};
#endif
static struct saltstruct {
unsigned int len;
unsigned int rounds;
unsigned char salt[SALT_LENGTH];
} *cur_salt;
static void init(struct fmt_main *self)
{
int omp_t = 1;
int max_crypts;
#ifdef _OPENMP
omp_t = omp_get_max_threads();
omp_t *= OMP_SCALE;
#endif
max_crypts = SIMD_COEF_SCALE * omp_t * MAX_KEYS_PER_CRYPT;
self->params.max_keys_per_crypt = max_crypts;
// we allocate 1 more than needed, and use that 'extra' value as a zero
// length PW to fill in the tail groups in MMX mode.
saved_len = mem_calloc(1 + max_crypts, sizeof(*saved_len));
saved_key = mem_calloc(1 + max_crypts, sizeof(*saved_key));
crypt_out = mem_calloc(1 + max_crypts, sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_len[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
saved_key[index][len] = 0;
}
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
/*
These are the 8 types of buffers this algorithm uses:
cp
pspc
cspp
ppc
cpp
psc
csp
pc
*/
static void LoadCryptStruct(cryptloopstruct *crypt_struct, int index, int idx, char *p_bytes, char *s_bytes) {
unsigned len_pc, len_ppsc, len_ppc, len_psc; // length of 'data'
unsigned tot_pc, tot_ppsc, tot_ppc, tot_psc; // length of entire block to crypt (128 or 256)
unsigned off_pc, off_pspc, off_ppc, off_psc; // offset to the crypt ptr for these 4 'types'.
unsigned dlen_pc, dlen_ppsc, dlen_ppc, dlen_psc; // is this 1 or 2 block (or actual len for CommonCrypto, since it uses SHA512_Final()
unsigned plen=saved_len[index];
unsigned char *cp = crypt_struct->buf;
cryptloopstruct *pstr = crypt_struct;
#ifdef SIMD_COEF_64
// in SSE mode, we FORCE every buffer to be 2 blocks, even if it COULD fit into 1.
// Then we simply use the 2 block SSE code.
unsigned char *next_cp;
cp += idx*2*128;
#endif
len_pc = plen + BINARY_SIZE;
len_ppsc = (plen<<1) + cur_salt->len + BINARY_SIZE;
len_ppc = (plen<<1) + BINARY_SIZE;
len_psc = plen + cur_salt->len + BINARY_SIZE;
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
if (len_pc <=111) tot_pc =128; else tot_pc =256;
if (len_ppsc<=111) tot_ppsc=128; else tot_ppsc=256;
if (len_ppc <=111) tot_ppc =128; else tot_ppc =256;
if (len_psc <=111) tot_psc =128; else tot_psc =256;
dlen_pc =len_pc;
dlen_ppsc=len_ppsc;
dlen_ppc =len_ppc;
dlen_psc =len_psc;
#else
if (len_pc <=111) {tot_pc =128; dlen_pc =128;}else{tot_pc =256; dlen_pc =256; }
if (len_ppsc<=111) {tot_ppsc=128; dlen_ppsc=128;}else{tot_ppsc=256; dlen_ppsc=256; }
if (len_ppc <=111) {tot_ppc =128; dlen_ppc =128;}else{tot_ppc =256; dlen_ppc =256; }
if (len_psc <=111) {tot_psc =128; dlen_psc =128;}else{tot_psc =256; dlen_psc =256; }
#endif
off_pc = len_pc - BINARY_SIZE;
off_pspc = len_ppsc - BINARY_SIZE;
off_ppc = len_ppc - BINARY_SIZE;
off_psc = len_psc - BINARY_SIZE;
// Adjust cp for idx;
#ifdef SIMD_COEF_64
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[0] is a cp (First of this type)
pstr->bufs[idx][0] = pstr->cptr[idx][41] = cp;
// For fist element only, we DO copy in the c value.
memcpy(cp, crypt_out[index], BINARY_SIZE); cp += BINARY_SIZE;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[0] = dlen_pc;
memcpy(cp, padding, tot_pc-2-len_pc); cp += (tot_pc-len_pc);
pstr->bufs[idx][0][tot_pc-2] = (len_pc<<3)>>8;
pstr->bufs[idx][0][tot_pc-1] = (len_pc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[1] is a pspc (First of this type)
pstr->bufs[idx][1] = cp;
pstr->cptr[idx][0] = cp + off_pspc;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len;
memcpy(cp, p_bytes, plen); cp += (plen+BINARY_SIZE);
if (!idx) pstr->datlen[1] = dlen_ppsc;
memcpy(cp, padding, tot_ppsc-2-len_ppsc); cp += (tot_ppsc-len_ppsc);
pstr->bufs[idx][1][tot_ppsc-2] = (len_ppsc<<3)>>8;
pstr->bufs[idx][1][tot_ppsc-1] = (len_ppsc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[2] is a cspp (First of this type)
pstr->bufs[idx][2] = pstr->cptr[idx][1] = cp;
cp += BINARY_SIZE;
memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[2] = dlen_ppsc;
memcpy(cp, padding, tot_ppsc-2-len_ppsc); cp += (tot_ppsc-len_ppsc);
pstr->bufs[idx][2][tot_ppsc-2] = (len_ppsc<<3)>>8;
pstr->bufs[idx][2][tot_ppsc-1] = (len_ppsc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[3] is a ppc (First of this type)
pstr->bufs[idx][3] = cp;
pstr->cptr[idx][2] = cp + off_ppc;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, p_bytes, plen); cp +=(plen+BINARY_SIZE);
if (!idx) pstr->datlen[3] = dlen_ppc;
memcpy(cp, padding, tot_ppc-2-len_ppc); cp += (tot_ppc-len_ppc);
pstr->bufs[idx][3][tot_ppc-2] = (len_ppc<<3)>>8;
pstr->bufs[idx][3][tot_ppc-1] = (len_ppc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[4] is a cspp (from 2)
pstr->bufs[idx][4] = pstr->cptr[idx][3] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[4] = dlen_ppsc;
// pstr->buf[5] is a pspc (from [1])
pstr->bufs[idx][5] = pstr->bufs[idx][1]; pstr->cptr[idx][4] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[5] = dlen_ppsc;
// pstr->buf[6] is a cpp (First of this type)
pstr->bufs[idx][6] = pstr->cptr[idx][5] = cp;
cp += BINARY_SIZE;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[6] = dlen_ppc;
memcpy(cp, padding, tot_ppc-2-len_ppc); cp += (tot_ppc-len_ppc);
pstr->bufs[idx][6][tot_ppc-2] = (len_ppc<<3)>>8;
pstr->bufs[idx][6][tot_ppc-1] = (len_ppc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[07] psc (First of this type)
pstr->bufs[idx][7] = cp;
pstr->cptr[idx][6] = cp + off_psc;
memcpy(cp, p_bytes, plen); cp += plen;
memcpy(cp, s_bytes, cur_salt->len); cp += (cur_salt->len+BINARY_SIZE);
if (!idx) pstr->datlen[7] = dlen_psc;
memcpy(cp, padding, tot_psc-2-len_psc); cp += (tot_psc-len_psc);
pstr->bufs[idx][7][tot_psc-2] = (len_psc<<3)>>8;
pstr->bufs[idx][7][tot_psc-1] = (len_psc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[08] cspp (from 2)
pstr->bufs[idx][8] = pstr->cptr[idx][7] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[8] = dlen_ppsc;
// pstr->buf[09] ppc (from 3)
pstr->bufs[idx][9] = pstr->bufs[idx][3]; pstr->cptr[idx][8] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[9] = dlen_ppc;
// pstr->buf[10] cspp (from 2)
pstr->bufs[idx][10] = pstr->cptr[idx][9] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[10] = dlen_ppsc;
// pstr->buf[11] pspc (from 1)
pstr->bufs[idx][11] = pstr->bufs[idx][1]; pstr->cptr[idx][10] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[11] = dlen_ppsc;
// pstr->buf[12] cpp (from 6)
pstr->bufs[idx][12] = pstr->cptr[idx][11] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[12] = dlen_ppc;
// pstr->buf[13] pspc (from 1)
pstr->bufs[idx][13] = pstr->bufs[idx][1]; pstr->cptr[idx][12] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[13] = dlen_ppsc;
// pstr->buf[14] csp (First of this type)
pstr->bufs[idx][14] = pstr->cptr[idx][13] = cp;
cp += BINARY_SIZE;
memcpy(cp, s_bytes, cur_salt->len); cp += cur_salt->len;
memcpy(cp, p_bytes, plen); cp += plen;
if (!idx) pstr->datlen[14] = dlen_psc;
memcpy(cp, padding, tot_psc-2-len_psc); cp += (tot_psc-len_psc);
pstr->bufs[idx][14][tot_psc-2] = (len_psc<<3)>>8;
pstr->bufs[idx][14][tot_psc-1] = (len_psc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[15] ppc (from 3)
pstr->bufs[idx][15] = pstr->bufs[idx][3]; pstr->cptr[idx][14] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[15] = dlen_ppc;
// pstr->buf[16] cspp (from 2)
pstr->bufs[idx][16] = pstr->cptr[idx][15] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[16] = dlen_ppsc;
// pstr->buf[17] pspc (from 1)
pstr->bufs[idx][17] = pstr->bufs[idx][1]; pstr->cptr[idx][16] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[17] = dlen_ppsc;
// pstr->buf[18] cpp (from 6)
pstr->bufs[idx][18] = pstr->cptr[idx][17] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[18] = dlen_ppc;
// pstr->buf[19] pspc (from 1)
pstr->bufs[idx][19] = pstr->bufs[idx][1]; pstr->cptr[idx][18] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[19] = dlen_ppsc;
// pstr->buf[20] cspp (from 2)
pstr->bufs[idx][20] = pstr->cptr[idx][19] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[20] = dlen_ppsc;
// pstr->buf[21] pc (First of this type)
pstr->bufs[idx][21] = cp;
pstr->cptr[idx][20] = cp + off_pc;
memcpy(cp, p_bytes, plen); cp += (plen+BINARY_SIZE);
if (!idx) pstr->datlen[21] = dlen_pc;
memcpy(cp, padding, tot_psc-2-len_pc);
pstr->bufs[idx][21][tot_pc-2] = (len_pc<<3)>>8;
pstr->bufs[idx][21][tot_pc-1] = (len_pc<<3)&0xFF;
#ifdef SIMD_COEF_64
cp = next_cp;
next_cp = cp + (2*128*BLKS);
#endif
// pstr->buf[22] cspp (from 2)
pstr->bufs[idx][22] = pstr->cptr[idx][21] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[22] = dlen_ppsc;
// pstr->buf[23] pspc (from 1)
pstr->bufs[idx][23] = pstr->bufs[idx][1]; pstr->cptr[idx][22] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[23] = dlen_ppsc;
// pstr->buf[24] cpp (from 6)
pstr->bufs[idx][24] = pstr->cptr[idx][23] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[24] = dlen_ppc;
// pstr->buf[25] pspc (from 1)
pstr->bufs[idx][25] = pstr->bufs[idx][1]; pstr->cptr[idx][24] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[25] = dlen_ppsc;
// pstr->buf[26] cspp (from 2)
pstr->bufs[idx][26] = pstr->cptr[idx][25] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[26] = dlen_ppsc;
// pstr->buf[27] ppc (from 3)
pstr->bufs[idx][27] = pstr->bufs[idx][3]; pstr->cptr[idx][26] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[27] = dlen_ppc;
// pstr->buf[28] csp (from 14)
pstr->bufs[idx][28] = pstr->cptr[idx][27] = pstr->bufs[idx][14];
if (!idx) pstr->datlen[28] = dlen_psc;
// pstr->buf[29] pspc (from 1)
pstr->bufs[idx][29] = pstr->bufs[idx][1]; pstr->cptr[idx][28] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[29] = dlen_ppsc;
// pstr->buf[30] cpp (from 6)
pstr->bufs[idx][30] = pstr->cptr[idx][29] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[30] = dlen_ppc;
// pstr->buf[31] pspc (from 1)
pstr->bufs[idx][31] = pstr->bufs[idx][1]; pstr->cptr[idx][30] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[31] = dlen_ppsc;
// pstr->buf[32] cspp (from 2)
pstr->bufs[idx][32] = pstr->cptr[idx][31] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[32] = dlen_ppsc;
// pstr->buf[33] ppc (from 3)
pstr->bufs[idx][33] = pstr->bufs[idx][3]; pstr->cptr[idx][32] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[33] = dlen_ppc;
// pstr->buf[34] cspp (from 2)
pstr->bufs[idx][34] = pstr->cptr[idx][33] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[34] = dlen_ppsc;
// pstr->buf[35] psc (from 7)
pstr->bufs[idx][35] = pstr->bufs[idx][7]; pstr->cptr[idx][34] = pstr->cptr[idx][6];
if (!idx) pstr->datlen[35] = dlen_psc;
// pstr->buf[36] cpp (from 6)
pstr->bufs[idx][36] = pstr->cptr[idx][35] = pstr->bufs[idx][6];
if (!idx) pstr->datlen[36] = dlen_ppc;
// pstr->buf[37] pspc (from 1)
pstr->bufs[idx][37] = pstr->bufs[idx][1]; pstr->cptr[idx][36] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[37] = dlen_ppsc;
// pstr->buf[38] cspp (from 2)
pstr->bufs[idx][38] = pstr->cptr[idx][37] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[38] = dlen_ppsc;
// pstr->buf[39] ppc (from 3)
pstr->bufs[idx][39] = pstr->bufs[idx][3]; pstr->cptr[idx][38] = pstr->cptr[idx][2];
if (!idx) pstr->datlen[39] = dlen_ppc;
// pstr->buf[40] cspp (from 2)
pstr->bufs[idx][40] = pstr->cptr[idx][39] = pstr->bufs[idx][2];
if (!idx) pstr->datlen[40] = dlen_ppsc;
// pstr->buf[41] pspc (from 1)
pstr->bufs[idx][41] = pstr->bufs[idx][1]; pstr->cptr[idx][40] = pstr->cptr[idx][0];
if (!idx) pstr->datlen[41] = dlen_ppsc;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
int *MixOrder, tot_todo;
#ifdef SIMD_COEF_64
// group based upon size splits.
MixOrder = mem_calloc((count+6*MAX_KEYS_PER_CRYPT), sizeof(int));
{
static const int lens[17][6] = {
{0,24,48,88,89,90}, // 0 byte salt
{0,24,48,88,89,90}, // 1 byte salt
{0,23,24,46,48,87}, // 2 byte salt
{0,23,24,45,48,87}, // 3 byte salt
{0,22,24,44,48,86}, // 4 byte salt
{0,22,24,43,48,86}, // 5 byte salt
{0,21,24,42,48,85}, // 6 byte salt
{0,21,24,41,48,85}, // 7 byte salt
{0,20,24,40,48,84}, // 8 byte salt
{0,20,24,39,48,84}, // 9 byte salt
{0,19,24,38,48,83}, // 10 byte salt
{0,19,24,37,48,83}, // 11 byte salt
{0,18,24,36,48,82}, // 12 byte salt
{0,18,24,35,48,82}, // 13 byte salt
{0,17,24,34,48,81}, // 14 byte salt
{0,17,24,33,48,81}, // 15 byte salt
{0,16,24,32,48,80} };
int j;
tot_todo = 0;
saved_len[count] = 0; // point all 'tail' MMX buffer elements to this location.
for (j = 0; j < 5; ++j) {
for (index = 0; index < count; ++index) {
if (saved_len[index] >= lens[cur_salt->len][j] && saved_len[index] < lens[cur_salt->len][j+1])
MixOrder[tot_todo++] = index;
}
while (tot_todo % MAX_KEYS_PER_CRYPT)
MixOrder[tot_todo++] = count;
}
}
#else
// no need to mix. just run them one after the next, in any order.
MixOrder = mem_calloc(count, sizeof(int));
for (index = 0; index < count; ++index)
MixOrder[index] = index;
tot_todo = count;
#endif
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < tot_todo; index += MAX_KEYS_PER_CRYPT)
{
// portably align temp_result char * pointer machine word size.
union xx {
unsigned char c[BINARY_SIZE];
ARCH_WORD a[BINARY_SIZE/sizeof(ARCH_WORD)];
} u;
unsigned char *temp_result = u.c;
SHA512_CTX ctx;
SHA512_CTX alt_ctx;
size_t cnt;
int idx;
char *cp;
char p_bytes[PLAINTEXT_LENGTH+1];
char s_bytes[PLAINTEXT_LENGTH+1];
char tmp_cls[sizeof(cryptloopstruct)+MEM_ALIGN_SIMD];
cryptloopstruct *crypt_struct;
#ifdef SIMD_COEF_64
char tmp_sse_out[8*MAX_KEYS_PER_CRYPT*8+MEM_ALIGN_SIMD];
ARCH_WORD_64 *sse_out;
sse_out = (ARCH_WORD_64 *)mem_align(tmp_sse_out, MEM_ALIGN_SIMD);
#endif
crypt_struct = (cryptloopstruct *)mem_align(tmp_cls,MEM_ALIGN_SIMD);
for (idx = 0; idx < MAX_KEYS_PER_CRYPT; ++idx)
{
/* Prepare for the real work. */
SHA512_Init(&ctx);
/* Add the key string. */
SHA512_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* The last part is the salt string. This must be at most 16
characters and it ends at the first `$' character (for
compatibility with existing implementations). */
SHA512_Update(&ctx, cur_salt->salt, cur_salt->len);
/* Compute alternate SHA512 sum with input KEY, SALT, and KEY. The
final result will be added to the first context. */
SHA512_Init(&alt_ctx);
/* Add key. */
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Add salt. */
SHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len);
/* Add key again. */
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Now get result of this (64 bytes) and add it to the other
context. */
SHA512_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &alt_ctx);
/* Add for any character in the key one byte of the alternate sum. */
for (cnt = saved_len[MixOrder[index+idx]]; cnt > BINARY_SIZE; cnt -= BINARY_SIZE)
SHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE);
SHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], cnt);
/* Take the binary representation of the length of the key and for every
1 add the alternate sum, for every 0 the key. */
for (cnt = saved_len[MixOrder[index+idx]]; cnt > 0; cnt >>= 1)
if ((cnt & 1) != 0)
SHA512_Update(&ctx, (unsigned char*)crypt_out[MixOrder[index+idx]], BINARY_SIZE);
else
SHA512_Update(&ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Create intermediate result. */
SHA512_Final((unsigned char*)crypt_out[MixOrder[index+idx]], &ctx);
/* Start computation of P byte sequence. */
SHA512_Init(&alt_ctx);
/* For every character in the password add the entire password. */
for (cnt = 0; cnt < saved_len[MixOrder[index+idx]]; ++cnt)
SHA512_Update(&alt_ctx, (unsigned char*)saved_key[MixOrder[index+idx]], saved_len[MixOrder[index+idx]]);
/* Finish the digest. */
SHA512_Final(temp_result, &alt_ctx);
/* Create byte sequence P. */
cp = p_bytes;
for (cnt = saved_len[MixOrder[index+idx]]; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)
cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;
memcpy (cp, temp_result, cnt);
/* Start computation of S byte sequence. */
SHA512_Init(&alt_ctx);
/* repeat the following 16+A[0] times, where A[0] represents the
first byte in digest A interpreted as an 8-bit unsigned value */
for (cnt = 0; cnt < 16 + ((unsigned char*)crypt_out[MixOrder[index+idx]])[0]; ++cnt)
SHA512_Update(&alt_ctx, cur_salt->salt, cur_salt->len);
/* Finish the digest. */
SHA512_Final(temp_result, &alt_ctx);
/* Create byte sequence S. */
cp = s_bytes;
for (cnt = cur_salt->len; cnt >= BINARY_SIZE; cnt -= BINARY_SIZE)
cp = (char *) memcpy (cp, temp_result, BINARY_SIZE) + BINARY_SIZE;
memcpy (cp, temp_result, cnt);
/* Repeatedly run the collected hash value through SHA512 to
burn CPU cycles. */
LoadCryptStruct(crypt_struct, MixOrder[index+idx], idx, p_bytes, s_bytes);
}
idx = 0;
#ifdef SIMD_COEF_64
for (cnt = 1; ; ++cnt) {
if (crypt_struct->datlen[idx]==256) {
unsigned char *cp = crypt_struct->bufs[0][idx];
SIMDSHA512body((__m128i *)cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK);
SIMDSHA512body((__m128i *)&cp[128], sse_out, sse_out, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK|SSEi_RELOAD);
} else {
unsigned char *cp = crypt_struct->bufs[0][idx];
SIMDSHA512body((__m128i *)cp, sse_out, NULL, SSEi_FLAT_IN|SSEi_2BUF_INPUT_FIRST_BLK);
}
if (cnt == cur_salt->rounds)
break;
{
int j, k;
for (k = 0; k < MAX_KEYS_PER_CRYPT; ++k) {
ARCH_WORD_64 *o = (ARCH_WORD_64 *)crypt_struct->cptr[k][idx];
for (j = 0; j < 8; ++j)
*o++ = JOHNSWAP64(sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64]);
}
}
if (++idx == 42)
idx = 0;
}
{
int j, k;
for (k = 0; k < MAX_KEYS_PER_CRYPT; ++k) {
ARCH_WORD_64 *o = (ARCH_WORD_64 *)crypt_out[MixOrder[index+k]];
for (j = 0; j < 8; ++j)
*o++ = JOHNSWAP64(sse_out[j*SIMD_COEF_64+(k&(SIMD_COEF_64-1))+k/SIMD_COEF_64*8*SIMD_COEF_64]);
}
}
#else
SHA512_Init(&ctx);
for (cnt = 1; ; ++cnt) {
// calling with 128 byte, or 256 byte always, will force the update to properly crypt the data.
// NOTE the data is fully formed. It ends in a 0x80, is padded with nulls, AND has bit appended.
SHA512_Update(&ctx, crypt_struct->bufs[0][idx], crypt_struct->datlen[idx]);
if (cnt == cur_salt->rounds)
break;
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
SHA512_Final(crypt_struct->cptr[0][idx], &ctx);
#else // !defined JTR_INC_COMMON_CRYPTO_SHA2, so it is oSSL, or generic
#if ARCH_LITTLE_ENDIAN
{
int j;
ARCH_WORD_64 *o = (ARCH_WORD_64 *)crypt_struct->cptr[0][idx];
for (j = 0; j < 8; ++j)
*o++ = JOHNSWAP64(ctx.h[j]);
}
#else
memcpy(crypt_struct->cptr[0][idx], ctx.h, BINARY_SIZE);
#endif
#endif
if (++idx == 42)
idx = 0;
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
SHA512_Init(&ctx);
#else
// this memcpy is 'good enough', used instead of SHA512_Init()
memcpy(ctx.h, ctx_init, sizeof(ctx_init));
#endif
}
#ifdef JTR_INC_COMMON_CRYPTO_SHA2
SHA512_Final((unsigned char*)crypt_out[MixOrder[index]], &ctx);
#else
#if ARCH_LITTLE_ENDIAN
{
int j;
ARCH_WORD_64 *o = (ARCH_WORD_64 *)crypt_out[MixOrder[index]];
for (j = 0; j < 8; ++j)
*o++ = JOHNSWAP64(ctx.h[j]);
}
#else
memcpy(crypt_out[MixOrder[index]], ctx.h, BINARY_SIZE);
#endif
#endif
#endif
}
MEM_FREE(MixOrder);
return count;
}
static void set_salt(void *salt)
{
cur_salt = salt;
}
static void *get_salt(char *ciphertext)
{
static struct saltstruct out;
int len;
memset(&out, 0, sizeof(out));
out.rounds = ROUNDS_DEFAULT;
ciphertext += 3;
if (!strncmp(ciphertext, ROUNDS_PREFIX,
sizeof(ROUNDS_PREFIX) - 1)) {
const char *num = ciphertext + sizeof(ROUNDS_PREFIX) - 1;
char *endp;
unsigned long int srounds = strtoul(num, &endp, 10);
if (*endp == '$')
{
ciphertext = endp + 1;
srounds = srounds < ROUNDS_MIN ?
ROUNDS_MIN : srounds;
out.rounds = srounds > ROUNDS_MAX ?
ROUNDS_MAX : srounds;
}
}
for (len = 0; ciphertext[len] != '$'; len++);
if (len > SALT_LENGTH)
len = SALT_LENGTH;
memcpy(out.salt, ciphertext, len);
out.len = len;
return &out;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int sha512crypt_iterations(void *salt)
{
struct saltstruct *sha512crypt_salt;
sha512crypt_salt = salt;
return (unsigned int)sha512crypt_salt->rounds;
}
// Public domain hash function by DJ Bernstein
// We are hashing the entire struct
static int salt_hash(void *salt)
{
unsigned char *s = salt;
unsigned int hash = 5381;
unsigned int i;
for (i = 0; i < SALT_SIZE; i++)
hash = ((hash << 5) + hash) ^ s[i];
return hash & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_cryptsha512 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"SHA512 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
sha512crypt_iterations,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
bml_threshold_ellpack_typed.c | #include "../../macros.h"
#include "../../typed.h"
#include "../bml_allocate.h"
#include "../bml_parallel.h"
#include "../bml_threshold.h"
#include "../bml_types.h"
#include "bml_allocate_ellpack.h"
#include "bml_threshold_ellpack.h"
#include "bml_types_ellpack.h"
#include <complex.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/** Threshold a matrix.
*
* \ingroup threshold_group
*
* \param A The matrix to be thresholded
* \param threshold Threshold value
* \return the thresholded A
*/
bml_matrix_ellpack_t *TYPED_FUNC(
bml_threshold_new_ellpack) (
bml_matrix_ellpack_t * A,
double threshold)
{
int N = A->N;
int M = A->M;
bml_matrix_ellpack_t *B =
TYPED_FUNC(bml_zero_matrix_ellpack) (N, M, A->distribution_mode);
REAL_T *A_value = (REAL_T *) A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
REAL_T *B_value = (REAL_T *) B->value;
int *B_index = B->index;
int *B_nnz = B->nnz;
int myRank = bml_getMyRank();
#pragma omp parallel for \
shared(N, M, A_value, A_index, A_nnz) \
shared(A_localRowMin, A_localRowMax, myRank) \
shared(B_value, B_index, B_nnz)
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
for (int j = 0; j < A_nnz[i]; j++)
{
if (is_above_threshold(A_value[ROWMAJOR(i, j, N, M)], threshold))
{
B_value[ROWMAJOR(i, B_nnz[i], N, M)] =
A_value[ROWMAJOR(i, j, N, M)];
B_index[ROWMAJOR(i, B_nnz[i], N, M)] =
A_index[ROWMAJOR(i, j, N, M)];
B_nnz[i]++;
}
}
}
return B;
}
/** Threshold a matrix in place.
*
* \ingroup threshold_group
*
* \param A The matrix to be thresholded
* \param threshold Threshold value
* \return the thresholded A
*/
void TYPED_FUNC(
bml_threshold_ellpack) (
bml_matrix_ellpack_t * A,
double threshold)
{
int N = A->N;
int M = A->M;
REAL_T *A_value = (REAL_T *) A->value;
int *A_index = A->index;
int *A_nnz = A->nnz;
int *A_localRowMin = A->domain->localRowMin;
int *A_localRowMax = A->domain->localRowMax;
int myRank = bml_getMyRank();
int rlen;
#pragma omp parallel for \
private(rlen) \
shared(N,M,A_value,A_index,A_nnz) \
shared(A_localRowMin, A_localRowMax, myRank)
//for (int i = 0; i < N; i++)
for (int i = A_localRowMin[myRank]; i < A_localRowMax[myRank]; i++)
{
rlen = 0;
for (int j = 0; j < A_nnz[i]; j++)
{
if (is_above_threshold(A_value[ROWMAJOR(i, j, N, M)], threshold))
{
if (rlen < j)
{
A_value[ROWMAJOR(i, rlen, N, M)] =
A_value[ROWMAJOR(i, j, N, M)];
A_index[ROWMAJOR(i, rlen, N, M)] =
A_index[ROWMAJOR(i, j, N, M)];
}
rlen++;
}
}
A_nnz[i] = rlen;
}
}
|
prop2DAcoVTIDenQ_DEO2_FDTD.h | #ifndef PROP2DACOVTIDENQ_DEO2_FDTD_H
#define PROP2DACOVTIDENQ_DEO2_FDTD_H
#include <omp.h>
#include <stddef.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <fftw3.h>
#include <complex>
#include "propagatorStaticFunctions.h"
#define MIN(x,y) ((x)<(y)?(x):(y))
class Prop2DAcoVTIDenQ_DEO2_FDTD {
public:
const bool _freeSurface;
const long _nbx, _nbz, _nthread, _nx, _nz, _nsponge;
const float _dx, _dz, _dt;
const float _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz;
const float _fDefault = 0.85f;
float * __restrict__ _v = NULL;
float * __restrict__ _eps = NULL;
float * __restrict__ _eta = NULL;
float * __restrict__ _b = NULL;
float * __restrict__ _f = NULL;
float * __restrict__ _dtOmegaInvQ = NULL;
float * __restrict__ _pSpace = NULL;
float * __restrict__ _mSpace = NULL;
float * __restrict__ _tmpPx1 = NULL;
float * __restrict__ _tmpPz1 = NULL;
float * __restrict__ _tmpMx1 = NULL;
float * __restrict__ _tmpMz1 = NULL;
float * __restrict__ _tmpPx2 = NULL;
float * __restrict__ _tmpPz2 = NULL;
float * __restrict__ _tmpMx2 = NULL;
float * __restrict__ _tmpMz2 = NULL;
float * _pOld = NULL;
float * _pCur = NULL;
float * _mOld = NULL;
float * _mCur = NULL;
Prop2DAcoVTIDenQ_DEO2_FDTD(
bool freeSurface,
long nthread,
long nx,
long nz,
long nsponge,
float dx,
float dz,
float dt,
const long nbx,
const long nbz) :
_freeSurface(freeSurface),
_nthread(nthread),
_nx(nx),
_nz(nz),
_nsponge(nsponge),
_nbx(nbx),
_nbz(nbz),
_dx(dx),
_dz(dz),
_dt(dt),
_c8_1(+1225.0 / 1024.0),
_c8_2(-245.0 / 3072.0),
_c8_3(+49.0 / 5120.0),
_c8_4(-5.0 / 7168.0),
_invDx(1.0 / _dx),
_invDz(1.0 / _dz) {
// Allocate arrays
_v = new float[_nx * _nz];
_eps = new float[_nx * _nz];
_eta = new float[_nx * _nz];
_b = new float[_nx * _nz];
_f = new float[_nx * _nz];
_dtOmegaInvQ = new float[_nx * _nz];
_pSpace = new float[_nx * _nz];
_mSpace = new float[_nx * _nz];
_tmpPx1 = new float[_nx * _nz];
_tmpPz1 = new float[_nx * _nz];
_tmpMx1 = new float[_nx * _nz];
_tmpMz1 = new float[_nx * _nz];
_tmpPx2 = new float[_nx * _nz];
_tmpPz2 = new float[_nx * _nz];
_tmpMx2 = new float[_nx * _nz];
_tmpMz2 = new float[_nx * _nz];
_pOld = new float[_nx * _nz];
_pCur = new float[_nx * _nz];
_mOld = new float[_nx * _nz];
_mCur = new float[_nx * _nz];
numaFirstTouch(_nx, _nz, _nthread, _v, _eps, _eta, _b,
_f, _dtOmegaInvQ, _pSpace, _mSpace,
_tmpPx1, _tmpPz1, _tmpMx1, _tmpMz1,
_tmpPx2, _tmpPz2, _tmpMx2, _tmpMz2,
_pOld, _pCur, _mOld, _mCur, _nbx, _nbz);
}
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void numaFirstTouch(
const long nx,
const long nz,
const long nthread,
float * __restrict__ v,
float * __restrict__ eps,
float * __restrict__ eta,
float * __restrict__ b,
float * __restrict__ f,
float * __restrict__ dtOmegaInvQ,
float * __restrict__ pSpace,
float * __restrict__ mSpace,
float * __restrict__ tmpPx1,
float * __restrict__ tmpPz1,
float * __restrict__ tmpMx1,
float * __restrict__ tmpMz1,
float * __restrict__ tmpPx2,
float * __restrict__ tmpPz2,
float * __restrict__ tmpMx2,
float * __restrict__ tmpMz2,
float * __restrict__ pOld,
float * __restrict__ pCur,
float * __restrict__ mOld,
float * __restrict__ mCur,
const long BX_2D,
const long BZ_2D) {
const long nx4 = nx - 4;
const long nz4 = nz - 4;
#pragma omp parallel for collapse(2) num_threads(nthread) schedule(static)
for (long bx = 4; bx < nx4; bx += BX_2D) {
for (long bz = 4; bz < nz4; bz += BZ_2D) {
const long kxmax = MIN(bx + BX_2D, nx4);
const long kzmax = MIN(bz + BZ_2D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * nz + kz;
v[k] = 0;
eps[k] = 0;
eta[k] = 0;
b[k] = 0;
f[k] = 0;
dtOmegaInvQ[k] = 0;
pSpace[k] = 0;
mSpace[k] = 0;
tmpPx1[k] = 0;
tmpPz1[k] = 0;
tmpMx1[k] = 0;
tmpMz1[k] = 0;
tmpPx2[k] = 0;
tmpPz2[k] = 0;
tmpMx2[k] = 0;
tmpMz2[k] = 0;
pOld[k] = 0;
pCur[k] = 0;
mOld[k] = 0;
mCur[k] = 0;
}
}
}
}
// zero annulus
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kz = 0; kz < 4; kz++) {
#pragma omp simd
for (long kx = 0; kx < nx; kx++) {
const long k = kx * _nz + kz;
v[k] = eps[k] = eta[k] = b[k] = f[k] = dtOmegaInvQ[k] = pSpace[k] =
mSpace[k] = tmpPx1[k] = tmpPz1[k] = tmpMx1[k] = tmpMz1[k] = tmpPx2[k] = tmpPz2[k] =
tmpMx2[k] = tmpMz2[k] = pOld[k] = pCur[k] = mOld[k] = mCur[k] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kz = nz4; kz < nz; kz++) {
#pragma omp simd
for (long kx = 0; kx < nx; kx++) {
const long k = kx * _nz + kz;
v[k] = eps[k] = eta[k] = b[k] = f[k] = dtOmegaInvQ[k] = pSpace[k] =
mSpace[k] = tmpPx1[k] = tmpPz1[k] = tmpMx1[k] = tmpMz1[k] = tmpPx2[k] = tmpPz2[k] =
tmpMx2[k] = tmpMz2[k] = pOld[k] = pCur[k] = mOld[k] = mCur[k] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 0; kx < 4; kx++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long k = kx * _nz + kz;
v[k] = eps[k] = eta[k] = b[k] = f[k] = dtOmegaInvQ[k] = pSpace[k] =
mSpace[k] = tmpPx1[k] = tmpPz1[k] = tmpMx1[k] = tmpMz1[k] = tmpPx2[k] = tmpPz2[k] =
tmpMx2[k] = tmpMz2[k] = pOld[k] = pCur[k] = mOld[k] = mCur[k] = 0;
}
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = nx4; kx < nx; kx++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long k = kx * _nz + kz;
v[k] = eps[k] = eta[k] = b[k] = f[k] = dtOmegaInvQ[k] = pSpace[k] =
mSpace[k] = tmpPx1[k] = tmpPz1[k] = tmpMx1[k] = tmpMz1[k] = tmpPx2[k] = tmpPz2[k] =
tmpMx2[k] = tmpMz2[k] = pOld[k] = pCur[k] = mOld[k] = mCur[k] = 0;
}
}
}
~Prop2DAcoVTIDenQ_DEO2_FDTD() {
if (_v != NULL) delete [] _v;
if (_eps != NULL) delete [] _eps;
if (_eta != NULL) delete [] _eta;
if (_b != NULL) delete [] _b;
if (_f != NULL) delete [] _f;
if (_dtOmegaInvQ != NULL) delete [] _dtOmegaInvQ;
if (_pSpace != NULL) delete [] _pSpace;
if (_mSpace != NULL) delete [] _mSpace;
if (_tmpPx1 != NULL) delete [] _tmpPx1;
if (_tmpPz1 != NULL) delete [] _tmpPz1;
if (_tmpMx1 != NULL) delete [] _tmpMx1;
if (_tmpMz1 != NULL) delete [] _tmpMz1;
if (_tmpPx2 != NULL) delete [] _tmpPx2;
if (_tmpPz2 != NULL) delete [] _tmpPz2;
if (_tmpMx2 != NULL) delete [] _tmpMx2;
if (_tmpMz2 != NULL) delete [] _tmpMz2;
if (_pOld != NULL) delete [] _pOld;
if (_pCur != NULL) delete [] _pCur;
if (_mOld != NULL) delete [] _mOld;
if (_mCur != NULL) delete [] _mCur;
}
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
void info() {
printf("\n");
printf("Prop2DAcoVTIDenQ_DEO2_FDTD\n");
printf(" nx,nz; %5ld %5ld\n", _nx, _nz);
printf(" nthread,nsponge,fs; %5ld %5ld %5d\n", _nthread, _nsponge, _freeSurface);
printf(" X min,max,inc; %+16.8f %+16.8f %+16.8f\n", 0.0, _dx * (_nx - 1), _dx);
printf(" Z min,max,inc; %+16.8f %+16.8f %+16.8f\n", 0.0, _dz * (_nz - 1), _dz);
}
/**
* Notes
* - User must have called setupDtOmegaInvQ_2D to initialize the array _dtOmegaInvQ
* - wavefield arrays are switched in this call
* pCur -> pOld
* pOld -> pCur
* mCur -> mOld
* mOld -> mCur
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void timeStep() {
applyFirstDerivatives2D_PlusHalf_Sandwich(
_freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz,
_pCur, _pCur, _mCur, _mCur, _eps, _eta, _f, _b,
_tmpPx1, _tmpPz1, _tmpMx1, _tmpMz1, _nbx, _nbz);
applyFirstDerivatives2D_MinusHalf_TimeUpdate_Nonlinear(
_freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, _dt,
_tmpPx1, _tmpPz1, _tmpMx1, _tmpMz1, _v, _b, _dtOmegaInvQ, _pCur, _mCur,
_pSpace, _mSpace, _pOld, _mOld, _nbx, _nbz);
// swap pointers
float *pswap = _pOld;
_pOld = _pCur;
_pCur = pswap;
float *mswap = _mOld;
_mOld = _mCur;
_mCur = mswap;
}
/**
* Same as above, but does not collect the spatial derivatives
* Note this is only used in the PSD operators, where the first (transient) time steps do
* not need to save the P'' term
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void timeStepLinear() {
applyFirstDerivatives2D_PlusHalf_Sandwich(
_freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz,
_pCur, _pCur, _mCur, _mCur, _eps, _eta, _f, _b,
_tmpPx1, _tmpPz1, _tmpMx1, _tmpMz1, _nbx, _nbz);
applyFirstDerivatives2D_MinusHalf_TimeUpdate_Linear(
_freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz, _dt,
_tmpPx1, _tmpPz1, _tmpMx1, _tmpMz1, _v, _b, _dtOmegaInvQ, _pCur, _mCur, _pOld, _mOld, _nbx, _nbz);
// swap pointers
float *pswap = _pOld;
_pOld = _pCur;
_pCur = pswap;
float *mswap = _mOld;
_mOld = _mCur;
_mCur = mswap;
}
/**
* Scale spatial derivatives by v^2/b to make them temporal derivs
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void scaleSpatialDerivatives() {
#pragma omp parallel for collapse(2) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _nz + kz;
const float v2OverB = _v[k] * _v[k] / _b[k];
_pSpace[k] *= v2OverB;
_mSpace[k] *= v2OverB;
}
}
}
}
}
/**
* Add the Born source at the current time
*
* User must have:
* - called the nonlinear forward
* - saved 2nd time derivative of pressure at corresponding time index in array dp2
* - Born source term will be injected into the _pCur array
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void forwardBornInjection_V(float *dVel, float *wavefieldDP, float *wavefieldDM) {
#pragma omp parallel for collapse(2) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _nz + kz;
const float V = _v[k];
const float B = _b[k];
const float dV = dVel[k];
// V^2/b factor to "clear" the b/V^2 factor on L_tP and L_tM
// _dt^2 factor is from the finite difference approximation
// 2B_dV/V^3 factor is from the linearization
const float factor = 2 * _dt * _dt * dV / V;
_pCur[k] += factor * wavefieldDP[k];
_mCur[k] += factor * wavefieldDM[k];
}
}
}
}
}
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void forwardBornInjection_VEA(float *dVel, float *dEps, float *dEta,
float *wavefieldP, float *wavefieldM, float *wavefieldDP, float *wavefieldDM) {
// Right side spatial derivatives for the Born source
applyFirstDerivatives2D_PlusHalf(
_freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz,
wavefieldP, wavefieldP, _tmpPx1, _tmpPz1, _nbx, _nbz);
applyFirstDerivatives2D_PlusHalf(
_freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz,
wavefieldM, wavefieldM, _tmpMx1, _tmpMz1, _nbx, _nbz);
// Sandwich terms for the Born source
// note flipped sign for Z derivative term between P and M
#pragma omp parallel for collapse(2) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _nz + kz;
const float V = _v[k];
const float E = _eps[k];
const float A = _eta[k];
const float B = _b[k];
const float F = _f[k];
const float dE = dEps[k];
const float dA = dEta[k];
_tmpPx2[k] = (+2 * B * dE) *_tmpPx1[k];
_tmpPz2[k] = (-2 * B * F * A * dA) *_tmpPz1[k] +
(dA * B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpMz1[k];
_tmpMx2[k] = 0;
_tmpMz2[k] = (+2 * B * F * A * dA) *_tmpMz1[k] +
(dA * B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpPz1[k];
}
}
}
}
// Left side spatial derivatives for the Born source
applyFirstDerivatives2D_MinusHalf(
_freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz,
_tmpPx2, _tmpPz2, _tmpPx1, _tmpPz1, _nbx, _nbz);
applyFirstDerivatives2D_MinusHalf(
_freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz,
_tmpMx2, _tmpMz2, _tmpMx1, _tmpMz1, _nbx, _nbz);
// add the born source at the current time
#pragma omp parallel for collapse(2) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _nz + kz;
const float V = _v[k];
const float B = _b[k];
const float dV = dVel[k];
const float dt2v2OverB = _dt * _dt * V * V / B;
const float factor = 2 * B * dV / (V * V * V);
_pCur[k] += dt2v2OverB * (factor * wavefieldDP[k] + _tmpPx1[k] + _tmpPz1[k]);
_mCur[k] += dt2v2OverB * (factor * wavefieldDM[k] + _tmpMx1[k] + _tmpMz1[k]);
}
}
}
}
}
/**
* Accumulate the Born image term at the current time
*
* User must have:
* - called the nonlinear forward
* - saved 2nd time derivative of pressure at corresponding time index in array dp2
* - Born image term will be accumulated iu the _dm array
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void adjointBornAccumulation_V(float *dVel, float *wavefieldDP, float *wavefieldDM) {
#pragma omp parallel for collapse(2) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _nz + kz;
const float V = _v[k];
const float B = _b[k];
const float factor = 2 * B / (V * V * V);
dVel[k] += factor * (wavefieldDP[k] * _pOld[k] + wavefieldDM[k] * _mOld[k]);
}
}
}
}
}
/**
* Apply Kz wavenumber filter for up/down wavefield seperation
* Faqi, 2011, Geophysics https://library.seg.org/doi/full/10.1190/1.3533914
*
* We handle the FWI and RTM imaging conditions with a condition inside the OMP loop
*
* Example Kz filtering with 8 samples
* frequency | +0 | +1 | +2 | +3 | N | -3 | -2 | -1 |
* original | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
* upgoing | 0 | X | X | X | 4 | 5 | 6 | 7 |
* dngoing | 0 | 1 | 2 | 3 | 4 | X | X | X |
*/
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void adjointBornAccumulation_wavefieldsep_V(float *dVel,
float *wavefieldDP, float *wavefieldDM, const long isFWI) {
const long nfft = 2 * _nz;
const float scale = 1.0f / (float)(nfft);
// FWI: adj wavefield is dngoing
// RTM: adj wavefield is upgoing
const long kfft_adj = (isFWI) ? 0 : nfft / 2;
std::complex<float> * __restrict__ tmp = new std::complex<float>[nfft];
fftwf_plan planForward = fftwf_plan_dft_1d(nfft,
reinterpret_cast<fftwf_complex*>(tmp),
reinterpret_cast<fftwf_complex*>(tmp), +1, FFTW_ESTIMATE);
fftwf_plan planInverse = fftwf_plan_dft_1d(nfft,
reinterpret_cast<fftwf_complex*>(tmp),
reinterpret_cast<fftwf_complex*>(tmp), -1, FFTW_ESTIMATE);
delete [] tmp;
#pragma omp parallel num_threads(_nthread)
{
std::complex<float> * __restrict__ tmp_nlf_p = new std::complex<float>[nfft];
std::complex<float> * __restrict__ tmp_adj_p = new std::complex<float>[nfft];
std::complex<float> * __restrict__ tmp_nlf_m = new std::complex<float>[nfft];
std::complex<float> * __restrict__ tmp_adj_m = new std::complex<float>[nfft];
#pragma omp for schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
const long kxmax = MIN(bx + _nbx, _nx);
for (long kx = bx; kx < kxmax; kx++) {
#pragma omp simd
for (long kfft = 0; kfft < nfft; kfft++) {
tmp_nlf_p[kfft] = 0;
tmp_adj_p[kfft] = 0;
tmp_nlf_m[kfft] = 0;
tmp_adj_m[kfft] = 0;
}
#pragma omp simd
for (long kz = 0; kz < _nz; kz++) {
const long k = kx * _nz + kz;
tmp_nlf_p[kz] = scale * wavefieldDP[k];
tmp_adj_p[kz] = scale * _pOld[k];
tmp_nlf_m[kz] = scale * wavefieldDM[k];
tmp_adj_m[kz] = scale * _mOld[k];
}
fftwf_execute_dft(planForward,
reinterpret_cast<fftwf_complex*>(tmp_nlf_p),
reinterpret_cast<fftwf_complex*>(tmp_nlf_p));
fftwf_execute_dft(planForward,
reinterpret_cast<fftwf_complex*>(tmp_adj_p),
reinterpret_cast<fftwf_complex*>(tmp_adj_p));
fftwf_execute_dft(planForward,
reinterpret_cast<fftwf_complex*>(tmp_nlf_m),
reinterpret_cast<fftwf_complex*>(tmp_nlf_m));
fftwf_execute_dft(planForward,
reinterpret_cast<fftwf_complex*>(tmp_adj_m),
reinterpret_cast<fftwf_complex*>(tmp_adj_m));
// upgoing: zero the positive frequencies, excluding Nyquist
// dngoing: zero the negative frequencies, excluding Nyquist
#pragma omp simd
for (long k = 1; k < nfft / 2; k++) {
tmp_nlf_p[nfft / 2 + k] = 0;
tmp_adj_p[kfft_adj + k] = 0;
tmp_nlf_m[nfft / 2 + k] = 0;
tmp_adj_m[kfft_adj + k] = 0;
}
fftwf_execute_dft(planInverse,
reinterpret_cast<fftwf_complex*>(tmp_nlf_p),
reinterpret_cast<fftwf_complex*>(tmp_nlf_p));
fftwf_execute_dft(planInverse,
reinterpret_cast<fftwf_complex*>(tmp_adj_p),
reinterpret_cast<fftwf_complex*>(tmp_adj_p));
fftwf_execute_dft(planInverse,
reinterpret_cast<fftwf_complex*>(tmp_nlf_m),
reinterpret_cast<fftwf_complex*>(tmp_nlf_m));
fftwf_execute_dft(planInverse,
reinterpret_cast<fftwf_complex*>(tmp_adj_m),
reinterpret_cast<fftwf_complex*>(tmp_adj_m));
// Faqi eq 10
// Applied to FWI: [Sup * Rdn]
// Applied to RTM: [Sup * Rup]
#pragma omp simd
for (long kz = 0; kz < _nz; kz++) {
const long k = kx * _nz + kz;
const float V = _v[k];
const float B = _b[k];
const float factor = 2 * B / (V * V * V);
dVel[k] += factor * (real(tmp_nlf_p[kz] * tmp_adj_p[kz]) + real(tmp_nlf_m[kz] * tmp_adj_m[kz]));
}
} // end loop over kx
} // end loop over bx
delete [] tmp_nlf_p;
delete [] tmp_adj_p;
delete [] tmp_nlf_m;
delete [] tmp_adj_m;
} // end parallel region
fftwf_destroy_plan(planForward);
fftwf_destroy_plan(planInverse);
}
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline void adjointBornAccumulation_VEA(float *dVel, float *dEps, float *dEta,
float *wavefieldP, float *wavefieldM, float *wavefieldDP, float *wavefieldDM) {
// Right side spatial derivatives for the adjoint accumulation
applyFirstDerivatives2D_PlusHalf(
_freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz,
wavefieldP, wavefieldP, _tmpPx1, _tmpPz1, _nbx, _nbz);
applyFirstDerivatives2D_PlusHalf(
_freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz,
wavefieldM, wavefieldM, _tmpMx1, _tmpMz1, _nbx, _nbz);
applyFirstDerivatives2D_PlusHalf(
_freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz,
_pOld, _pOld, _tmpPx2, _tmpPz2, _nbx, _nbz);
applyFirstDerivatives2D_PlusHalf(
_freeSurface, _nx, _nz, _nthread, _c8_1, _c8_2, _c8_3, _c8_4, _invDx, _invDz,
_mOld, _mOld, _tmpMx2, _tmpMz2, _nbx, _nbz);
// Sandwich terms for the adjoint accumulation
#pragma omp parallel for collapse(2) num_threads(_nthread) schedule(static)
for (long bx = 0; bx < _nx; bx += _nbx) {
for (long bz = 0; bz < _nz; bz += _nbz) {
const long kxmax = MIN(bx + _nbx, _nx);
const long kzmax = MIN(bz + _nbz, _nz);
for (long kx = bx; kx < kxmax; kx++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long k = kx * _nz + kz;
const float V = _v[k];
const float E = _eps[k];
const float A = _eta[k];
const float B = _b[k];
const float F = _f[k];
const float factor = 2 * B / (V * V * V);
dVel[k] +=
factor * wavefieldDP[k] * _pOld[k] +
factor * wavefieldDM[k] * _mOld[k];
dEps[k] += -2 * B * _tmpPx1[k] * _tmpPx2[k];
const float partP =
2 * B * F * A * _tmpPz1[k] - (B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpMz1[k];
const float partM =
2 * B * F * A * _tmpMz1[k] + (B * F * (1 - 2 * A * A) / sqrt(1 - A * A)) * _tmpPz1[k];
dEta[k] += partP * _tmpPz2[k] - partM * _tmpMz2[k];
}
}
}
}
}
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline static void applyFirstDerivatives2D_PlusHalf_Sandwich(
const long freeSurface,
const long nx,
const long nz,
const long nthread,
const Type c8_1,
const Type c8_2,
const Type c8_3,
const Type c8_4,
const Type invDx,
const Type invDz,
const Type * __restrict__ const inPX,
const Type * __restrict__ const inPZ,
const Type * __restrict__ const inMX,
const Type * __restrict__ const inMZ,
const Type * __restrict__ const fieldEps,
const Type * __restrict__ const fieldEta,
const Type * __restrict__ const fieldVsVp,
const Type * __restrict__ const fieldBuoy,
Type * __restrict__ tmpPX,
Type * __restrict__ tmpPZ,
Type * __restrict__ tmpMX,
Type * __restrict__ tmpMZ,
const long BX_2D,
const long BZ_2D) {
const long nx4 = nx - 4;
const long nz4 = nz - 4;
// zero output arrays
#pragma omp parallel for collapse(2) num_threads(nthread) schedule(static)
for (long bx = 0; bx < nx; bx += BX_2D) {
for (long bz = 0; bz < nz; bz += BZ_2D) {
const long kxmax = MIN(bx + BX_2D, nx);
const long kzmax = MIN(bz + BZ_2D, nz);
for (long kx = bx; kx < kxmax; kx++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
long k = kx * nz + kz;
tmpPX[k] = 0;
tmpPZ[k] = 0;
tmpMX[k] = 0;
tmpMZ[k] = 0;
}
}
}
}
// interior
#pragma omp parallel for collapse(2) num_threads(nthread) schedule(guided)
for (long bx = 4; bx < nx4; bx += BX_2D) {
for (long bz = 4; bz < nz4; bz += BZ_2D) { /* cache blocking */
const long kxmax = MIN(bx + BX_2D, nx4);
const long kzmax = MIN(bz + BZ_2D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long kxnz = kx * nz;
const long k = kxnz + kz;
const Type stencilDPx =
c8_1 * (- inPX[(kx+0) * nz + kz] + inPX[(kx+1) * nz + kz]) +
c8_2 * (- inPX[(kx-1) * nz + kz] + inPX[(kx+2) * nz + kz]) +
c8_3 * (- inPX[(kx-2) * nz + kz] + inPX[(kx+3) * nz + kz]) +
c8_4 * (- inPX[(kx-3) * nz + kz] + inPX[(kx+4) * nz + kz]);
const Type stencilDPz =
c8_1 * (- inPZ[kxnz + (kz+0)] + inPZ[kxnz + (kz+1)]) +
c8_2 * (- inPZ[kxnz + (kz-1)] + inPZ[kxnz + (kz+2)]) +
c8_3 * (- inPZ[kxnz + (kz-2)] + inPZ[kxnz + (kz+3)]) +
c8_4 * (- inPZ[kxnz + (kz-3)] + inPZ[kxnz + (kz+4)]);
const Type stencilDMx =
c8_1 * (- inMX[(kx+0) * nz + kz] + inMX[(kx+1) * nz + kz]) +
c8_2 * (- inMX[(kx-1) * nz + kz] + inMX[(kx+2) * nz + kz]) +
c8_3 * (- inMX[(kx-2) * nz + kz] + inMX[(kx+3) * nz + kz]) +
c8_4 * (- inMX[(kx-3) * nz + kz] + inMX[(kx+4) * nz + kz]);
const Type stencilDMz =
c8_1 * (- inMZ[kxnz + (kz+0)] + inMZ[kxnz + (kz+1)]) +
c8_2 * (- inMZ[kxnz + (kz-1)] + inMZ[kxnz + (kz+2)]) +
c8_3 * (- inMZ[kxnz + (kz-2)] + inMZ[kxnz + (kz+3)]) +
c8_4 * (- inMZ[kxnz + (kz-3)] + inMZ[kxnz + (kz+4)]);
const Type dPx = invDx * stencilDPx;
const Type dPz = invDz * stencilDPz;
const Type dMx = invDx * stencilDMx;
const Type dMz = invDz * stencilDMz;
const Type E = 1 + 2 * fieldEps[k];
const Type A = fieldEta[k];
const Type F = fieldVsVp[k];
const Type B = fieldBuoy[k];
tmpPX[k] = B * E * dPx;
tmpPZ[k] = B * (1 - F * A * A) * dPz + B * F * A * sqrt(1 - A * A) * dMz;
tmpMX[k] = B * (1 - F) * dMx;
tmpMZ[k] = B * F * A * sqrt(1 - A * A) * dPz + B * (1 - F + F * A * A) * dMz;
}
}
}
}
// roll on free surface
if (freeSurface) {
#pragma omp parallel for num_threads(nthread) schedule(guided)
for (long kx = 4; kx < nx4; kx++) {
// kz = 0 -- 1/2 cells below free surface for Z derivative, at free surface for X/Y derivative
// X and Y derivatives are identically zero
// [kx * nz + 0]
{
const Type stencilDPz0 =
c8_1 * (- inPZ[kx * nz + 0] + inPZ[kx * nz + 1]) +
c8_2 * (+ inPZ[kx * nz + 1] + inPZ[kx * nz + 2]) +
c8_3 * (+ inPZ[kx * nz + 2] + inPZ[kx * nz + 3]) +
c8_4 * (+ inPZ[kx * nz + 3] + inPZ[kx * nz + 4]);
const Type stencilDMz0 =
c8_1 * (- inMZ[kx * nz + 0] + inMZ[kx * nz + 1]) +
c8_2 * (+ inMZ[kx * nz + 1] + inMZ[kx * nz + 2]) +
c8_3 * (+ inMZ[kx * nz + 2] + inMZ[kx * nz + 3]) +
c8_4 * (+ inMZ[kx * nz + 3] + inMZ[kx * nz + 4]);
const Type dPx = 0;
const Type dPz = invDz * stencilDPz0;
const Type dMx = 0;
const Type dMz = invDz * stencilDMz0;
const long k = kx * nz + 0;
const Type E = 1 + 2 * fieldEps[k];
const Type A = fieldEta[k];
const Type F = fieldVsVp[k];
const Type B = fieldBuoy[k];
tmpPX[k] = B * E * dPx;
tmpPZ[k] = B * (1 - F * A * A) * dPz + B * F * A * sqrt(1 - A * A) * dMz;
tmpMX[k] = B * (1 - F) * dMx;
tmpMZ[k] = B * F * A * sqrt(1 - A * A) * dPz + B * (1 - F + F * A * A) * dMz;
}
// kz = 1 -- 1 1/2 cells below free surface for Z derivative, 1 cells below for X/Y derivative
// [kx * nz + 1]
{
const Type stencilDPx1 =
c8_1 * (- inPX[(kx+0) * nz + 1] + inPX[(kx+1) * nz + 1]) +
c8_2 * (- inPX[(kx-1) * nz + 1] + inPX[(kx+2) * nz + 1]) +
c8_3 * (- inPX[(kx-2) * nz + 1] + inPX[(kx+3) * nz + 1]) +
c8_4 * (- inPX[(kx-3) * nz + 1] + inPX[(kx+4) * nz + 1]);
const Type stencilDPz1 =
c8_1 * (- inPZ[kx * nz + 1] + inPZ[kx * nz + 2]) +
c8_2 * (- inPZ[kx * nz + 0] + inPZ[kx * nz + 3]) +
c8_3 * (+ inPZ[kx * nz + 1] + inPZ[kx * nz + 4]) +
c8_4 * (+ inPZ[kx * nz + 2] + inPZ[kx * nz + 5]);
const Type stencilDMx1 =
c8_1 * (- inMX[(kx+0) * nz + 1] + inMX[(kx+1) * nz + 1]) +
c8_2 * (- inMX[(kx-1) * nz + 1] + inMX[(kx+2) * nz + 1]) +
c8_3 * (- inMX[(kx-2) * nz + 1] + inMX[(kx+3) * nz + 1]) +
c8_4 * (- inMX[(kx-3) * nz + 1] + inMX[(kx+4) * nz + 1]);
const Type stencilDMz1 =
c8_1 * (- inMZ[kx * nz + 1] + inMZ[kx * nz + 2]) +
c8_2 * (- inMZ[kx * nz + 0] + inMZ[kx * nz + 3]) +
c8_3 * (+ inMZ[kx * nz + 1] + inMZ[kx * nz + 4]) +
c8_4 * (+ inMZ[kx * nz + 2] + inMZ[kx * nz + 5]);
const Type dPx = invDx * stencilDPx1;
const Type dPz = invDz * stencilDPz1;
const Type dMx = invDx * stencilDMx1;
const Type dMz = invDz * stencilDMz1;
const long k = kx * nz + 1;
const Type E = 1 + 2 * fieldEps[k];
const Type A = fieldEta[k];
const Type F = fieldVsVp[k];
const Type B = fieldBuoy[k];
tmpPX[k] = B * E * dPx;
tmpPZ[k] = B * (1 - F * A * A) * dPz + B * F * A * sqrt(1 - A * A) * dMz;
tmpMX[k] = B * (1 - F) * dMx;
tmpMZ[k] = B * F * A * sqrt(1 - A * A) * dPz + B * (1 - F + F * A * A) * dMz;
}
// kz = 2 -- 2 1/2 cells below free surface for Z derivative, 2 cells below for X/Y derivative
// [kx * nz + 2]
{
const Type stencilDPx2 =
c8_1 * (- inPX[(kx+0) * nz + 2] + inPX[(kx+1) * nz + 2]) +
c8_2 * (- inPX[(kx-1) * nz + 2] + inPX[(kx+2) * nz + 2]) +
c8_3 * (- inPX[(kx-2) * nz + 2] + inPX[(kx+3) * nz + 2]) +
c8_4 * (- inPX[(kx-3) * nz + 2] + inPX[(kx+4) * nz + 2]);
const Type stencilDPz2 =
c8_1 * (- inPZ[kx * nz + 2] + inPZ[kx * nz + 3]) +
c8_2 * (- inPZ[kx * nz + 1] + inPZ[kx * nz + 4]) +
c8_3 * (- inPZ[kx * nz + 0] + inPZ[kx * nz + 5]) +
c8_4 * (+ inPZ[kx * nz + 1] + inPZ[kx * nz + 6]);
const Type stencilDMx2 =
c8_1 * (- inMX[(kx+0) * nz + 2] + inMX[(kx+1) * nz + 2]) +
c8_2 * (- inMX[(kx-1) * nz + 2] + inMX[(kx+2) * nz + 2]) +
c8_3 * (- inMX[(kx-2) * nz + 2] + inMX[(kx+3) * nz + 2]) +
c8_4 * (- inMX[(kx-3) * nz + 2] + inMX[(kx+4) * nz + 2]);
const Type stencilDMz2 =
c8_1 * (- inMZ[kx * nz + 2] + inMZ[kx * nz + 3]) +
c8_2 * (- inMZ[kx * nz + 1] + inMZ[kx * nz + 4]) +
c8_3 * (- inMZ[kx * nz + 0] + inMZ[kx * nz + 5]) +
c8_4 * (+ inMZ[kx * nz + 1] + inMZ[kx * nz + 6]);
const Type dPx = invDx * stencilDPx2;
const Type dPz = invDz * stencilDPz2;
const Type dMx = invDx * stencilDMx2;
const Type dMz = invDz * stencilDMz2;
const long k = kx * nz + 2;
const Type E = 1 + 2 * fieldEps[k];
const Type A = fieldEta[k];
const Type F = fieldVsVp[k];
const Type B = fieldBuoy[k];
tmpPX[k] = B * E * dPx;
tmpPZ[k] = B * (1 - F * A * A) * dPz + B * F * A * sqrt(1 - A * A) * dMz;
tmpMX[k] = B * (1 - F) * dMx;
tmpMZ[k] = B * F * A * sqrt(1 - A * A) * dPz + B * (1 - F + F * A * A) * dMz;
}
// kz = 3 -- 3 1/2 cells below free surface for Z derivative, 3 cells below for X/Y derivative
// [kx * nz + 3]
{
const Type stencilDPx3 =
c8_1 * (- inPX[(kx+0) * nz + 3] + inPX[(kx+1) * nz + 3]) +
c8_2 * (- inPX[(kx-1) * nz + 3] + inPX[(kx+2) * nz + 3]) +
c8_3 * (- inPX[(kx-2) * nz + 3] + inPX[(kx+3) * nz + 3]) +
c8_4 * (- inPX[(kx-3) * nz + 3] + inPX[(kx+4) * nz + 3]);
const Type stencilDPz3 =
c8_1 * (- inPZ[kx * nz + 3] + inPZ[kx * nz + 4]) +
c8_2 * (- inPZ[kx * nz + 2] + inPZ[kx * nz + 5]) +
c8_3 * (- inPZ[kx * nz + 1] + inPZ[kx * nz + 6]) +
c8_4 * (- inPZ[kx * nz + 0] + inPZ[kx * nz + 7]);
const Type stencilDMx3 =
c8_1 * (- inMX[(kx+0) * nz + 3] + inMX[(kx+1) * nz + 3]) +
c8_2 * (- inMX[(kx-1) * nz + 3] + inMX[(kx+2) * nz + 3]) +
c8_3 * (- inMX[(kx-2) * nz + 3] + inMX[(kx+3) * nz + 3]) +
c8_4 * (- inMX[(kx-3) * nz + 3] + inMX[(kx+4) * nz + 3]);
const Type stencilDMz3 =
c8_1 * (- inMZ[kx * nz + 3] + inMZ[kx * nz + 4]) +
c8_2 * (- inMZ[kx * nz + 2] + inMZ[kx * nz + 5]) +
c8_3 * (- inMZ[kx * nz + 1] + inMZ[kx * nz + 6]) +
c8_4 * (- inMZ[kx * nz + 0] + inMZ[kx * nz + 7]);
const Type dPx = invDx * stencilDPx3;
const Type dPz = invDz * stencilDPz3;
const Type dMx = invDx * stencilDMx3;
const Type dMz = invDz * stencilDMz3;
const long k = kx * nz + 3;
const Type E = 1 + 2 * fieldEps[k];
const Type A = fieldEta[k];
const Type F = fieldVsVp[k];
const Type B = fieldBuoy[k];
tmpPX[k] = B * E * dPx;
tmpPZ[k] = B * (1 - F * A * A) * dPz + B * F * A * sqrt(1 - A * A) * dMz;
tmpMX[k] = B * (1 - F) * dMx;
tmpMZ[k] = B * F * A * sqrt(1 - A * A) * dPz + B * (1 - F + F * A * A) * dMz;
}
}
}
}
/**
* Combines
* applyFirstDerivatives_MinusHalf(P)
* secondOrderTimeUpdate_BubeConservation(P)
* applyFirstDerivatives_MinusHalf(M)
* secondOrderTimeUpdate_BubeConservation(M)
*
* Updates pOld and mOld with second order time update
*
* Nonlinear method: outputs the spatial derivatives for source wavefield serialization
* Linear method: does not output the spatial derivatives
*/
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline static void applyFirstDerivatives2D_MinusHalf_TimeUpdate_Nonlinear(
const long freeSurface,
const long nx,
const long nz,
const long nthread,
const Type c8_1,
const Type c8_2,
const Type c8_3,
const Type c8_4,
const Type invDx,
const Type invDz,
const Type dtMod,
const Type * __restrict__ const tmpPX,
const Type * __restrict__ const tmpPZ,
const Type * __restrict__ const tmpMX,
const Type * __restrict__ const tmpMZ,
const Type * __restrict__ const fieldVel,
const Type * __restrict__ const fieldBuoy,
const Type * __restrict__ const dtOmegaInvQ,
Type * __restrict__ pCur,
Type * __restrict__ mCur,
Type * __restrict__ pSpace,
Type * __restrict__ mSpace,
Type * __restrict__ pOld,
Type * __restrict__ mOld,
const long BX_2D,
const long BZ_2D) {
const long nx4 = nx - 4;
const long nz4 = nz - 4;
const Type dt2 = dtMod * dtMod;
// zero output arrays
#pragma omp parallel for collapse(2) num_threads(nthread) schedule(static)
for (long bx = 0; bx < nx; bx += BX_2D) {
for (long bz = 0; bz < nz; bz += BZ_2D) {
const long kxmax = MIN(bx + BX_2D, nx);
const long kzmax = MIN(bz + BZ_2D, nz);
for (long kx = bx; kx < kxmax; kx++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
long k = kx * nz + kz;
pSpace[k] = 0;
mSpace[k] = 0;
}
}
}
}
// interior
#pragma omp parallel for collapse(2) num_threads(nthread) schedule(static)
for (long bx = 4; bx < nx4; bx += BX_2D) {
for (long bz = 4; bz < nz4; bz += BZ_2D) { /* cache blocking */
const long kxmax = MIN(bx + BX_2D, nx4);
const long kzmax = MIN(bz + BZ_2D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const Type stencilDPx =
c8_1 * (- tmpPX[(kx-1) * nz + kz] + tmpPX[(kx+0) * nz + kz]) +
c8_2 * (- tmpPX[(kx-2) * nz + kz] + tmpPX[(kx+1) * nz + kz]) +
c8_3 * (- tmpPX[(kx-3) * nz + kz] + tmpPX[(kx+2) * nz + kz]) +
c8_4 * (- tmpPX[(kx-4) * nz + kz] + tmpPX[(kx+3) * nz + kz]);
const Type stencilDPz =
c8_1 * (- tmpPZ[kx * nz + (kz-1)] + tmpPZ[kx * nz + (kz+0)]) +
c8_2 * (- tmpPZ[kx * nz + (kz-2)] + tmpPZ[kx * nz + (kz+1)]) +
c8_3 * (- tmpPZ[kx * nz + (kz-3)] + tmpPZ[kx * nz + (kz+2)]) +
c8_4 * (- tmpPZ[kx * nz + (kz-4)] + tmpPZ[kx * nz + (kz+3)]);
const Type stencilDMx =
c8_1 * (- tmpMX[(kx-1) * nz + kz] + tmpMX[(kx+0) * nz + kz]) +
c8_2 * (- tmpMX[(kx-2) * nz + kz] + tmpMX[(kx+1) * nz + kz]) +
c8_3 * (- tmpMX[(kx-3) * nz + kz] + tmpMX[(kx+2) * nz + kz]) +
c8_4 * (- tmpMX[(kx-4) * nz + kz] + tmpMX[(kx+3) * nz + kz]);
const Type stencilDMz =
c8_1 * (- tmpMZ[kx * nz + (kz-1)] + tmpMZ[kx * nz + (kz+0)]) +
c8_2 * (- tmpMZ[kx * nz + (kz-2)] + tmpMZ[kx * nz + (kz+1)]) +
c8_3 * (- tmpMZ[kx * nz + (kz-3)] + tmpMZ[kx * nz + (kz+2)]) +
c8_4 * (- tmpMZ[kx * nz + (kz-4)] + tmpMZ[kx * nz + (kz+3)]);
const Type dPX = invDx * stencilDPx;
const Type dPZ = invDz * stencilDPz;
const Type dMX = invDx * stencilDMx;
const Type dMZ = invDz * stencilDMz;
const long k = kx * nz + kz;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pSpace[k] = dPX + dPZ;
mSpace[k] = dMX + dMZ;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
}
}
}
// roll on free surface
if (freeSurface) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 4; kx < nx4; kx++) {
// kz = 0 -- at the free surface -- p = 0
// [kx * nz + 0]
{
const Type dPX = 0;
const Type dPZ = 0;
const Type dMX = 0;
const Type dMZ = 0;
const long k = kx * nz + 0;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pSpace[k] = dPX + dPZ;
mSpace[k] = dMX + dMZ;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
// kz = 1 -- one cell below the free surface
// [kx * nz + 1]
{
const Type stencilDPx1 =
c8_1 * (- tmpPX[(kx-1) * nz + 1] + tmpPX[(kx+0) * nz + 1]) +
c8_2 * (- tmpPX[(kx-2) * nz + 1] + tmpPX[(kx+1) * nz + 1]) +
c8_3 * (- tmpPX[(kx-3) * nz + 1] + tmpPX[(kx+2) * nz + 1]) +
c8_4 * (- tmpPX[(kx-4) * nz + 1] + tmpPX[(kx+3) * nz + 1]);
const Type stencilDPz1 =
c8_1 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 1]) +
c8_2 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 2]) +
c8_3 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 3]) +
c8_4 * (- tmpPZ[kx * nz + 2] + tmpPZ[kx * nz + 4]);
const Type stencilDMx1 =
c8_1 * (- tmpMX[(kx-1) * nz + 1] + tmpMX[(kx+0) * nz + 1]) +
c8_2 * (- tmpMX[(kx-2) * nz + 1] + tmpMX[(kx+1) * nz + 1]) +
c8_3 * (- tmpMX[(kx-3) * nz + 1] + tmpMX[(kx+2) * nz + 1]) +
c8_4 * (- tmpMX[(kx-4) * nz + 1] + tmpMX[(kx+3) * nz + 1]);
const Type stencilDMz1 =
c8_1 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 1]) +
c8_2 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 2]) +
c8_3 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 3]) +
c8_4 * (- tmpMZ[kx * nz + 2] + tmpMZ[kx * nz + 4]);
const Type dPx = invDx * stencilDPx1;
const Type dPz = invDz * stencilDPz1;
const Type dMx = invDx * stencilDMx1;
const Type dMz = invDz * stencilDMz1;
const long k = kx * nz + 1;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pSpace[k] = dPx + dPz;
mSpace[k] = dMx + dMz;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
// kz = 2 -- two cells below the free surface
// [kx * nz + 2]
{
const Type stencilDPx2 =
c8_1 * (- tmpPX[(kx-1) * nz + 2] + tmpPX[(kx+0) * nz + 2]) +
c8_2 * (- tmpPX[(kx-2) * nz + 2] + tmpPX[(kx+1) * nz + 2]) +
c8_3 * (- tmpPX[(kx-3) * nz + 2] + tmpPX[(kx+2) * nz + 2]) +
c8_4 * (- tmpPX[(kx-4) * nz + 2] + tmpPX[(kx+3) * nz + 2]);
const Type stencilDPz2 =
c8_1 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 2]) +
c8_2 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 3]) +
c8_3 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 4]) +
c8_4 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 5]);
const Type stencilDMx2 =
c8_1 * (- tmpMX[(kx-1) * nz + 2] + tmpMX[(kx+0) * nz + 2]) +
c8_2 * (- tmpMX[(kx-2) * nz + 2] + tmpMX[(kx+1) * nz + 2]) +
c8_3 * (- tmpMX[(kx-3) * nz + 2] + tmpMX[(kx+2) * nz + 2]) +
c8_4 * (- tmpMX[(kx-4) * nz + 2] + tmpMX[(kx+3) * nz + 2]);
const Type stencilDMz2 =
c8_1 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 2]) +
c8_2 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 3]) +
c8_3 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 4]) +
c8_4 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 5]);
const Type dPx = invDx * stencilDPx2;
const Type dPz = invDz * stencilDPz2;
const Type dMx = invDx * stencilDMx2;
const Type dMz = invDz * stencilDMz2;
const long k = kx * nz + 2;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pSpace[k] = dPx + dPz;
mSpace[k] = dMx + dMz;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
// kz = 3 -- three cells below the free surface
// [kx * nz + 3]
{
const Type stencilDPx3 =
c8_1 * (- tmpPX[(kx-1) * nz + 3] + tmpPX[(kx+0) * nz + 3]) +
c8_2 * (- tmpPX[(kx-2) * nz + 3] + tmpPX[(kx+1) * nz + 3]) +
c8_3 * (- tmpPX[(kx-3) * nz + 3] + tmpPX[(kx+2) * nz + 3]) +
c8_4 * (- tmpPX[(kx-4) * nz + 3] + tmpPX[(kx+3) * nz + 3]);
const Type stencilDPz3 =
c8_1 * (- tmpPZ[kx * nz + 2] + tmpPZ[kx * nz + 3]) +
c8_2 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 4]) +
c8_3 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 5]) +
c8_4 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 6]);
const Type stencilDMx3 =
c8_1 * (- tmpMX[(kx-1) * nz + 3] + tmpMX[(kx+0) * nz + 3]) +
c8_2 * (- tmpMX[(kx-2) * nz + 3] + tmpMX[(kx+1) * nz + 3]) +
c8_3 * (- tmpMX[(kx-3) * nz + 3] + tmpMX[(kx+2) * nz + 3]) +
c8_4 * (- tmpMX[(kx-4) * nz + 3] + tmpMX[(kx+3) * nz + 3]);
const Type stencilDMz3 =
c8_1 * (- tmpMZ[kx * nz + 2] + tmpMZ[kx * nz + 3]) +
c8_2 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 4]) +
c8_3 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 5]) +
c8_4 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 6]);
const Type dPx = invDx * stencilDPx3;
const Type dPz = invDz * stencilDPz3;
const Type dMx = invDx * stencilDMx3;
const Type dMz = invDz * stencilDMz3;
const long k = kx * nz + 3;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pSpace[k] = dPx + dPz;
mSpace[k] = dMx + dMz;
pOld[k] = dt2V2_B * pSpace[k] - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * mSpace[k] - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
}
}
}
template<class Type>
#if defined(__FUNCTION_CLONES__)
__attribute__((target_clones("avx","avx2","avx512f","default")))
#endif
inline static void applyFirstDerivatives2D_MinusHalf_TimeUpdate_Linear(
const long freeSurface,
const long nx,
const long nz,
const long nthread,
const Type c8_1,
const Type c8_2,
const Type c8_3,
const Type c8_4,
const Type invDx,
const Type invDz,
const Type dtMod,
const Type * __restrict__ const tmpPX,
const Type * __restrict__ const tmpPZ,
const Type * __restrict__ const tmpMX,
const Type * __restrict__ const tmpMZ,
const Type * __restrict__ const fieldVel,
const Type * __restrict__ const fieldBuoy,
const Type * __restrict__ const dtOmegaInvQ,
Type * __restrict__ pCur,
Type * __restrict__ mCur,
Type * __restrict__ pOld,
Type * __restrict__ mOld,
const long BX_2D,
const long BZ_2D) {
const long nx4 = nx - 4;
const long nz4 = nz - 4;
const Type dt2 = dtMod * dtMod;
// interior
#pragma omp parallel for collapse(2) num_threads(nthread) schedule(static)
for (long bx = 4; bx < nx4; bx += BX_2D) {
for (long bz = 4; bz < nz4; bz += BZ_2D) { /* cache blocking */
const long kxmax = MIN(bx + BX_2D, nx4);
const long kzmax = MIN(bz + BZ_2D, nz4);
for (long kx = bx; kx < kxmax; kx++) {
#pragma omp simd
for (long kz = bz; kz < kzmax; kz++) {
const long kxnz = kx * nz;
const long k = kxnz + kz;
const Type stencilDPx =
c8_1 * (- tmpPX[(kx-1) * nz + kz] + tmpPX[(kx+0) * nz + kz]) +
c8_2 * (- tmpPX[(kx-2) * nz + kz] + tmpPX[(kx+1) * nz + kz]) +
c8_3 * (- tmpPX[(kx-3) * nz + kz] + tmpPX[(kx+2) * nz + kz]) +
c8_4 * (- tmpPX[(kx-4) * nz + kz] + tmpPX[(kx+3) * nz + kz]);
const Type stencilDPz =
c8_1 * (- tmpPZ[kxnz + (kz-1)] + tmpPZ[kxnz + (kz+0)]) +
c8_2 * (- tmpPZ[kxnz + (kz-2)] + tmpPZ[kxnz + (kz+1)]) +
c8_3 * (- tmpPZ[kxnz + (kz-3)] + tmpPZ[kxnz + (kz+2)]) +
c8_4 * (- tmpPZ[kxnz + (kz-4)] + tmpPZ[kxnz + (kz+3)]);
const Type stencilDMx =
c8_1 * (- tmpMX[(kx-1) * nz + kz] + tmpMX[(kx+0) * nz + kz]) +
c8_2 * (- tmpMX[(kx-2) * nz + kz] + tmpMX[(kx+1) * nz + kz]) +
c8_3 * (- tmpMX[(kx-3) * nz + kz] + tmpMX[(kx+2) * nz + kz]) +
c8_4 * (- tmpMX[(kx-4) * nz + kz] + tmpMX[(kx+3) * nz + kz]);
const Type stencilDMz =
c8_1 * (- tmpMZ[kxnz + (kz-1)] + tmpMZ[kxnz + (kz+0)]) +
c8_2 * (- tmpMZ[kxnz + (kz-2)] + tmpMZ[kxnz + (kz+1)]) +
c8_3 * (- tmpMZ[kxnz + (kz-3)] + tmpMZ[kxnz + (kz+2)]) +
c8_4 * (- tmpMZ[kxnz + (kz-4)] + tmpMZ[kxnz + (kz+3)]);
const Type dPx = invDx * stencilDPx;
const Type dPz = invDz * stencilDPz;
const Type dMx = invDx * stencilDMx;
const Type dMz = invDz * stencilDMz;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pOld[k] = dt2V2_B * (dPx + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * (dMx + dMz) - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
}
}
}
// roll on free surface
if (freeSurface) {
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kx = 4; kx < nx4; kx++) {
// kz = 0 -- at the free surface -- p = 0
// [kx * nz + 0]
{
const Type dPX = 0;
const Type dPZ = 0;
const Type dMX = 0;
const Type dMZ = 0;
const long k = kx * nz + 0;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pOld[k] = dt2V2_B * (dPX + dPZ) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * (dMX + dMZ) - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
// kz = 1 -- one cell below the free surface
// [kx * nz + 1]
{
const Type stencilDPx1 =
c8_1 * (- tmpPX[(kx-1) * nz + 1] + tmpPX[(kx+0) * nz + 1]) +
c8_2 * (- tmpPX[(kx-2) * nz + 1] + tmpPX[(kx+1) * nz + 1]) +
c8_3 * (- tmpPX[(kx-3) * nz + 1] + tmpPX[(kx+2) * nz + 1]) +
c8_4 * (- tmpPX[(kx-4) * nz + 1] + tmpPX[(kx+3) * nz + 1]);
const Type stencilDPz1 =
c8_1 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 1]) +
c8_2 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 2]) +
c8_3 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 3]) +
c8_4 * (- tmpPZ[kx * nz + 2] + tmpPZ[kx * nz + 4]);
const Type stencilDMx1 =
c8_1 * (- tmpMX[(kx-1) * nz + 1] + tmpMX[(kx+0) * nz + 1]) +
c8_2 * (- tmpMX[(kx-2) * nz + 1] + tmpMX[(kx+1) * nz + 1]) +
c8_3 * (- tmpMX[(kx-3) * nz + 1] + tmpMX[(kx+2) * nz + 1]) +
c8_4 * (- tmpMX[(kx-4) * nz + 1] + tmpMX[(kx+3) * nz + 1]);
const Type stencilDMz1 =
c8_1 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 1]) +
c8_2 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 2]) +
c8_3 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 3]) +
c8_4 * (- tmpMZ[kx * nz + 2] + tmpMZ[kx * nz + 4]);
const Type dPx = invDx * stencilDPx1;
const Type dPz = invDz * stencilDPz1;
const Type dMx = invDx * stencilDMx1;
const Type dMz = invDz * stencilDMz1;
const long k = kx * nz + 1;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pOld[k] = dt2V2_B * (dPx + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * (dMx + dMz) - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
// kz = 2 -- two cells below the free surface
// [kx * nz + 2]
{
const Type stencilDPx2 =
c8_1 * (- tmpPX[(kx-1) * nz + 2] + tmpPX[(kx+0) * nz + 2]) +
c8_2 * (- tmpPX[(kx-2) * nz + 2] + tmpPX[(kx+1) * nz + 2]) +
c8_3 * (- tmpPX[(kx-3) * nz + 2] + tmpPX[(kx+2) * nz + 2]) +
c8_4 * (- tmpPX[(kx-4) * nz + 2] + tmpPX[(kx+3) * nz + 2]);
const Type stencilDPz2 =
c8_1 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 2]) +
c8_2 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 3]) +
c8_3 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 4]) +
c8_4 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 5]);
const Type stencilDMx2 =
c8_1 * (- tmpMX[(kx-1) * nz + 2] + tmpMX[(kx+0) * nz + 2]) +
c8_2 * (- tmpMX[(kx-2) * nz + 2] + tmpMX[(kx+1) * nz + 2]) +
c8_3 * (- tmpMX[(kx-3) * nz + 2] + tmpMX[(kx+2) * nz + 2]) +
c8_4 * (- tmpMX[(kx-4) * nz + 2] + tmpMX[(kx+3) * nz + 2]);
const Type stencilDMz2 =
c8_1 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 2]) +
c8_2 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 3]) +
c8_3 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 4]) +
c8_4 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 5]);
const Type dPx = invDx * stencilDPx2;
const Type dPz = invDz * stencilDPz2;
const Type dMx = invDx * stencilDMx2;
const Type dMz = invDz * stencilDMz2;
const long k = kx * nz + 2;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pOld[k] = dt2V2_B * (dPx + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * (dMx + dMz) - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
// kz = 3 -- three cells below the free surface
// [kx * nz + 3]
{
const Type stencilDPx3 =
c8_1 * (- tmpPX[(kx-1) * nz + 3] + tmpPX[(kx+0) * nz + 3]) +
c8_2 * (- tmpPX[(kx-2) * nz + 3] + tmpPX[(kx+1) * nz + 3]) +
c8_3 * (- tmpPX[(kx-3) * nz + 3] + tmpPX[(kx+2) * nz + 3]) +
c8_4 * (- tmpPX[(kx-4) * nz + 3] + tmpPX[(kx+3) * nz + 3]);
const Type stencilDPz3 =
c8_1 * (- tmpPZ[kx * nz + 2] + tmpPZ[kx * nz + 3]) +
c8_2 * (- tmpPZ[kx * nz + 1] + tmpPZ[kx * nz + 4]) +
c8_3 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 5]) +
c8_4 * (- tmpPZ[kx * nz + 0] + tmpPZ[kx * nz + 6]);
const Type stencilDMx3 =
c8_1 * (- tmpMX[(kx-1) * nz + 3] + tmpMX[(kx+0) * nz + 3]) +
c8_2 * (- tmpMX[(kx-2) * nz + 3] + tmpMX[(kx+1) * nz + 3]) +
c8_3 * (- tmpMX[(kx-3) * nz + 3] + tmpMX[(kx+2) * nz + 3]) +
c8_4 * (- tmpMX[(kx-4) * nz + 3] + tmpMX[(kx+3) * nz + 3]);
const Type stencilDMz3 =
c8_1 * (- tmpMZ[kx * nz + 2] + tmpMZ[kx * nz + 3]) +
c8_2 * (- tmpMZ[kx * nz + 1] + tmpMZ[kx * nz + 4]) +
c8_3 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 5]) +
c8_4 * (- tmpMZ[kx * nz + 0] + tmpMZ[kx * nz + 6]);
const Type dPx = invDx * stencilDPx3;
const Type dPz = invDz * stencilDPz3;
const Type dMx = invDx * stencilDMx3;
const Type dMz = invDz * stencilDMz3;
const long k = kx * nz + 3;
const Type dt2V2_B = dt2 * fieldVel[k] * fieldVel[k] / fieldBuoy[k];
pOld[k] = dt2V2_B * (dPx + dPz) - dtOmegaInvQ[k] * (pCur[k] - pOld[k]) - pOld[k] + 2 * pCur[k];
mOld[k] = dt2V2_B * (dMx + dMz) - dtOmegaInvQ[k] * (mCur[k] - mOld[k]) - mOld[k] + 2 * mCur[k];
}
}
}
}
};
#endif
|
tree-vect-loop.c | /* Loop Vectorization
Copyright (C) 2003-2018 Free Software Foundation, Inc.
Contributed by Dorit Naishlos <dorit@il.ibm.com> and
Ira Rosen <irar@il.ibm.com>
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "gimple.h"
#include "cfghooks.h"
#include "tree-pass.h"
#include "ssa.h"
#include "optabs-tree.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "cfganal.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "tree-ssa-loop-ivopts.h"
#include "tree-ssa-loop-manip.h"
#include "tree-ssa-loop-niter.h"
#include "tree-ssa-loop.h"
#include "cfgloop.h"
#include "params.h"
#include "tree-scalar-evolution.h"
#include "tree-vectorizer.h"
#include "gimple-fold.h"
#include "cgraph.h"
#include "tree-cfg.h"
#include "tree-if-conv.h"
#include "internal-fn.h"
#include "tree-vector-builder.h"
#include "vec-perm-indices.h"
#include "tree-eh.h"
/* Loop Vectorization Pass.
This pass tries to vectorize loops.
For example, the vectorizer transforms the following simple loop:
short a[N]; short b[N]; short c[N]; int i;
for (i=0; i<N; i++){
a[i] = b[i] + c[i];
}
as if it was manually vectorized by rewriting the source code into:
typedef int __attribute__((mode(V8HI))) v8hi;
short a[N]; short b[N]; short c[N]; int i;
v8hi *pa = (v8hi*)a, *pb = (v8hi*)b, *pc = (v8hi*)c;
v8hi va, vb, vc;
for (i=0; i<N/8; i++){
vb = pb[i];
vc = pc[i];
va = vb + vc;
pa[i] = va;
}
The main entry to this pass is vectorize_loops(), in which
the vectorizer applies a set of analyses on a given set of loops,
followed by the actual vectorization transformation for the loops that
had successfully passed the analysis phase.
Throughout this pass we make a distinction between two types of
data: scalars (which are represented by SSA_NAMES), and memory references
("data-refs"). These two types of data require different handling both
during analysis and transformation. The types of data-refs that the
vectorizer currently supports are ARRAY_REFS which base is an array DECL
(not a pointer), and INDIRECT_REFS through pointers; both array and pointer
accesses are required to have a simple (consecutive) access pattern.
Analysis phase:
===============
The driver for the analysis phase is vect_analyze_loop().
It applies a set of analyses, some of which rely on the scalar evolution
analyzer (scev) developed by Sebastian Pop.
During the analysis phase the vectorizer records some information
per stmt in a "stmt_vec_info" struct which is attached to each stmt in the
loop, as well as general information about the loop as a whole, which is
recorded in a "loop_vec_info" struct attached to each loop.
Transformation phase:
=====================
The loop transformation phase scans all the stmts in the loop, and
creates a vector stmt (or a sequence of stmts) for each scalar stmt S in
the loop that needs to be vectorized. It inserts the vector code sequence
just before the scalar stmt S, and records a pointer to the vector code
in STMT_VINFO_VEC_STMT (stmt_info) (stmt_info is the stmt_vec_info struct
attached to S). This pointer will be used for the vectorization of following
stmts which use the def of stmt S. Stmt S is removed if it writes to memory;
otherwise, we rely on dead code elimination for removing it.
For example, say stmt S1 was vectorized into stmt VS1:
VS1: vb = px[i];
S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
S2: a = b;
To vectorize stmt S2, the vectorizer first finds the stmt that defines
the operand 'b' (S1), and gets the relevant vector def 'vb' from the
vector stmt VS1 pointed to by STMT_VINFO_VEC_STMT (stmt_info (S1)). The
resulting sequence would be:
VS1: vb = px[i];
S1: b = x[i]; STMT_VINFO_VEC_STMT (stmt_info (S1)) = VS1
VS2: va = vb;
S2: a = b; STMT_VINFO_VEC_STMT (stmt_info (S2)) = VS2
Operands that are not SSA_NAMEs, are data-refs that appear in
load/store operations (like 'x[i]' in S1), and are handled differently.
Target modeling:
=================
Currently the only target specific information that is used is the
size of the vector (in bytes) - "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD".
Targets that can support different sizes of vectors, for now will need
to specify one value for "TARGET_VECTORIZE_UNITS_PER_SIMD_WORD". More
flexibility will be added in the future.
Since we only vectorize operations which vector form can be
expressed using existing tree codes, to verify that an operation is
supported, the vectorizer checks the relevant optab at the relevant
machine_mode (e.g, optab_handler (add_optab, V8HImode)). If
the value found is CODE_FOR_nothing, then there's no target support, and
we can't vectorize the stmt.
For additional information on this project see:
http://gcc.gnu.org/projects/tree-ssa/vectorization.html
*/
static void vect_estimate_min_profitable_iters (loop_vec_info, int *, int *);
/* Function vect_determine_vectorization_factor
Determine the vectorization factor (VF). VF is the number of data elements
that are operated upon in parallel in a single iteration of the vectorized
loop. For example, when vectorizing a loop that operates on 4byte elements,
on a target with vector size (VS) 16byte, the VF is set to 4, since 4
elements can fit in a single vector register.
We currently support vectorization of loops in which all types operated upon
are of the same size. Therefore this function currently sets VF according to
the size of the types operated upon, and fails if there are multiple sizes
in the loop.
VF is also the factor by which the loop iterations are strip-mined, e.g.:
original loop:
for (i=0; i<N; i++){
a[i] = b[i] + c[i];
}
vectorized loop:
for (i=0; i<N; i+=VF){
a[i:VF] = b[i:VF] + c[i:VF];
}
*/
static bool
vect_determine_vectorization_factor (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
unsigned nbbs = loop->num_nodes;
poly_uint64 vectorization_factor = 1;
tree scalar_type = NULL_TREE;
gphi *phi;
tree vectype;
stmt_vec_info stmt_info;
unsigned i;
HOST_WIDE_INT dummy;
gimple *stmt, *pattern_stmt = NULL;
gimple_seq pattern_def_seq = NULL;
gimple_stmt_iterator pattern_def_si = gsi_none ();
bool analyze_pattern_stmt = false;
bool bool_result;
auto_vec<stmt_vec_info> mask_producers;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_determine_vectorization_factor ===\n");
for (i = 0; i < nbbs; i++)
{
basic_block bb = bbs[i];
for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
gsi_next (&si))
{
phi = si.phi ();
stmt_info = vinfo_for_stmt (phi);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "==> examining phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
gcc_assert (stmt_info);
if (STMT_VINFO_RELEVANT_P (stmt_info)
|| STMT_VINFO_LIVE_P (stmt_info))
{
gcc_assert (!STMT_VINFO_VECTYPE (stmt_info));
scalar_type = TREE_TYPE (PHI_RESULT (phi));
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
dump_printf (MSG_NOTE, "\n");
}
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported "
"data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
STMT_VINFO_VECTYPE (stmt_info) = vectype;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
dump_printf (MSG_NOTE, "\n");
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
dump_printf (MSG_NOTE, "\n");
}
vect_update_max_nunits (&vectorization_factor, vectype);
}
}
for (gimple_stmt_iterator si = gsi_start_bb (bb);
!gsi_end_p (si) || analyze_pattern_stmt;)
{
tree vf_vectype;
if (analyze_pattern_stmt)
stmt = pattern_stmt;
else
stmt = gsi_stmt (si);
stmt_info = vinfo_for_stmt (stmt);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
}
gcc_assert (stmt_info);
/* Skip stmts which do not need to be vectorized. */
if ((!STMT_VINFO_RELEVANT_P (stmt_info)
&& !STMT_VINFO_LIVE_P (stmt_info))
|| gimple_clobber_p (stmt))
{
if (STMT_VINFO_IN_PATTERN_P (stmt_info)
&& (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
&& (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
{
stmt = pattern_stmt;
stmt_info = vinfo_for_stmt (pattern_stmt);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
}
}
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "skip.\n");
gsi_next (&si);
continue;
}
}
else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
&& (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
&& (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
analyze_pattern_stmt = true;
/* If a pattern statement has def stmts, analyze them too. */
if (is_pattern_stmt_p (stmt_info))
{
if (pattern_def_seq == NULL)
{
pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
pattern_def_si = gsi_start (pattern_def_seq);
}
else if (!gsi_end_p (pattern_def_si))
gsi_next (&pattern_def_si);
if (pattern_def_seq != NULL)
{
gimple *pattern_def_stmt = NULL;
stmt_vec_info pattern_def_stmt_info = NULL;
while (!gsi_end_p (pattern_def_si))
{
pattern_def_stmt = gsi_stmt (pattern_def_si);
pattern_def_stmt_info
= vinfo_for_stmt (pattern_def_stmt);
if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
|| STMT_VINFO_LIVE_P (pattern_def_stmt_info))
break;
gsi_next (&pattern_def_si);
}
if (!gsi_end_p (pattern_def_si))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> examining pattern def stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
pattern_def_stmt, 0);
}
stmt = pattern_def_stmt;
stmt_info = pattern_def_stmt_info;
}
else
{
pattern_def_si = gsi_none ();
analyze_pattern_stmt = false;
}
}
else
analyze_pattern_stmt = false;
}
if (gimple_get_lhs (stmt) == NULL_TREE
/* MASK_STORE has no lhs, but is ok. */
&& (!is_gimple_call (stmt)
|| !gimple_call_internal_p (stmt)
|| gimple_call_internal_fn (stmt) != IFN_MASK_STORE))
{
if (is_gimple_call (stmt))
{
/* Ignore calls with no lhs. These must be calls to
#pragma omp simd functions, and what vectorization factor
it really needs can't be determined until
vectorizable_simd_clone_call. */
if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
{
pattern_def_seq = NULL;
gsi_next (&si);
}
continue;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: irregular stmt.");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
0);
}
return false;
}
if (VECTOR_MODE_P (TYPE_MODE (gimple_expr_type (stmt))))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vector stmt in loop:");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt, 0);
}
return false;
}
bool_result = false;
if (STMT_VINFO_VECTYPE (stmt_info))
{
/* The only case when a vectype had been already set is for stmts
that contain a dataref, or for "pattern-stmts" (stmts
generated by the vectorizer to represent/replace a certain
idiom). */
gcc_assert (STMT_VINFO_DATA_REF (stmt_info)
|| is_pattern_stmt_p (stmt_info)
|| !gsi_end_p (pattern_def_si));
vectype = STMT_VINFO_VECTYPE (stmt_info);
}
else
{
gcc_assert (!STMT_VINFO_DATA_REF (stmt_info));
if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
scalar_type = TREE_TYPE (gimple_call_arg (stmt, 3));
else
scalar_type = TREE_TYPE (gimple_get_lhs (stmt));
/* Bool ops don't participate in vectorization factor
computation. For comparison use compared types to
compute a factor. */
if (VECT_SCALAR_BOOLEAN_TYPE_P (scalar_type)
&& is_gimple_assign (stmt)
&& gimple_assign_rhs_code (stmt) != COND_EXPR)
{
if (STMT_VINFO_RELEVANT_P (stmt_info)
|| STMT_VINFO_LIVE_P (stmt_info))
mask_producers.safe_push (stmt_info);
bool_result = true;
if (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt))
== tcc_comparison
&& !VECT_SCALAR_BOOLEAN_TYPE_P
(TREE_TYPE (gimple_assign_rhs1 (stmt))))
scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
else
{
if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
{
pattern_def_seq = NULL;
gsi_next (&si);
}
continue;
}
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
dump_printf (MSG_NOTE, "\n");
}
vectype = get_vectype_for_scalar_type (scalar_type);
if (!vectype)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported "
"data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (!bool_result)
STMT_VINFO_VECTYPE (stmt_info) = vectype;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vectype);
dump_printf (MSG_NOTE, "\n");
}
}
/* Don't try to compute VF out scalar types if we stmt
produces boolean vector. Use result vectype instead. */
if (VECTOR_BOOLEAN_TYPE_P (vectype))
vf_vectype = vectype;
else
{
/* The vectorization factor is according to the smallest
scalar type (or the largest vector size, but we only
support one vector size per loop). */
if (!bool_result)
scalar_type = vect_get_smallest_scalar_type (stmt, &dummy,
&dummy);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"get vectype for scalar type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, scalar_type);
dump_printf (MSG_NOTE, "\n");
}
vf_vectype = get_vectype_for_scalar_type (scalar_type);
}
if (!vf_vectype)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported data-type ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
scalar_type);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (maybe_ne (GET_MODE_SIZE (TYPE_MODE (vectype)),
GET_MODE_SIZE (TYPE_MODE (vf_vectype))))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: different sized vector "
"types in statement, ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
vectype);
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
vf_vectype);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectype: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, vf_vectype);
dump_printf (MSG_NOTE, "\n");
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vf_vectype));
dump_printf (MSG_NOTE, "\n");
}
vect_update_max_nunits (&vectorization_factor, vf_vectype);
if (!analyze_pattern_stmt && gsi_end_p (pattern_def_si))
{
pattern_def_seq = NULL;
gsi_next (&si);
}
}
}
/* TODO: Analyze cost. Decide if worth while to vectorize. */
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "vectorization factor = ");
dump_dec (MSG_NOTE, vectorization_factor);
dump_printf (MSG_NOTE, "\n");
}
if (known_le (vectorization_factor, 1U))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported data-type\n");
return false;
}
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
for (i = 0; i < mask_producers.length (); i++)
{
tree mask_type = NULL;
stmt = STMT_VINFO_STMT (mask_producers[i]);
if (is_gimple_assign (stmt)
&& TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison
&& !VECT_SCALAR_BOOLEAN_TYPE_P
(TREE_TYPE (gimple_assign_rhs1 (stmt))))
{
scalar_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
mask_type = get_mask_type_for_scalar_type (scalar_type);
if (!mask_type)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported mask\n");
return false;
}
}
else
{
tree rhs;
ssa_op_iter iter;
gimple *def_stmt;
enum vect_def_type dt;
FOR_EACH_SSA_TREE_OPERAND (rhs, stmt, iter, SSA_OP_USE)
{
if (!vect_is_simple_use (rhs, mask_producers[i]->vinfo,
&def_stmt, &dt, &vectype))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't compute mask type "
"for statement, ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
0);
}
return false;
}
/* No vectype probably means external definition.
Allow it in case there is another operand which
allows to determine mask type. */
if (!vectype)
continue;
if (!mask_type)
mask_type = vectype;
else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
TYPE_VECTOR_SUBPARTS (vectype)))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: different sized masks "
"types in statement, ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
mask_type);
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
vectype);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
else if (VECTOR_BOOLEAN_TYPE_P (mask_type)
!= VECTOR_BOOLEAN_TYPE_P (vectype))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: mixed mask and "
"nonmask vector types in statement, ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
mask_type);
dump_printf (MSG_MISSED_OPTIMIZATION, " and ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM,
vectype);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return false;
}
}
/* We may compare boolean value loaded as vector of integers.
Fix mask_type in such case. */
if (mask_type
&& !VECTOR_BOOLEAN_TYPE_P (mask_type)
&& gimple_code (stmt) == GIMPLE_ASSIGN
&& TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison)
mask_type = build_same_sized_truth_vector_type (mask_type);
}
/* No mask_type should mean loop invariant predicate.
This is probably a subject for optimization in
if-conversion. */
if (!mask_type)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't compute mask type "
"for statement, ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, stmt,
0);
}
return false;
}
STMT_VINFO_VECTYPE (mask_producers[i]) = mask_type;
}
return true;
}
/* Function vect_is_simple_iv_evolution.
FORNOW: A simple evolution of an induction variables in the loop is
considered a polynomial evolution. */
static bool
vect_is_simple_iv_evolution (unsigned loop_nb, tree access_fn, tree * init,
tree * step)
{
tree init_expr;
tree step_expr;
tree evolution_part = evolution_part_in_loop_num (access_fn, loop_nb);
basic_block bb;
/* When there is no evolution in this loop, the evolution function
is not "simple". */
if (evolution_part == NULL_TREE)
return false;
/* When the evolution is a polynomial of degree >= 2
the evolution function is not "simple". */
if (tree_is_chrec (evolution_part))
return false;
step_expr = evolution_part;
init_expr = unshare_expr (initial_condition_in_loop_num (access_fn, loop_nb));
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "step: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, step_expr);
dump_printf (MSG_NOTE, ", init: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, init_expr);
dump_printf (MSG_NOTE, "\n");
}
*init = init_expr;
*step = step_expr;
if (TREE_CODE (step_expr) != INTEGER_CST
&& (TREE_CODE (step_expr) != SSA_NAME
|| ((bb = gimple_bb (SSA_NAME_DEF_STMT (step_expr)))
&& flow_bb_inside_loop_p (get_loop (cfun, loop_nb), bb))
|| (!INTEGRAL_TYPE_P (TREE_TYPE (step_expr))
&& (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr))
|| !flag_associative_math)))
&& (TREE_CODE (step_expr) != REAL_CST
|| !flag_associative_math))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"step unknown.\n");
return false;
}
return true;
}
/* Function vect_analyze_scalar_cycles_1.
Examine the cross iteration def-use cycles of scalar variables
in LOOP. LOOP_VINFO represents the loop that is now being
considered for vectorization (can be LOOP, or an outer-loop
enclosing LOOP). */
static void
vect_analyze_scalar_cycles_1 (loop_vec_info loop_vinfo, struct loop *loop)
{
basic_block bb = loop->header;
tree init, step;
auto_vec<gimple *, 64> worklist;
gphi_iterator gsi;
bool double_reduc;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_scalar_cycles ===\n");
/* First - identify all inductions. Reduction detection assumes that all the
inductions have been identified, therefore, this order must not be
changed. */
for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
{
gphi *phi = gsi.phi ();
tree access_fn = NULL;
tree def = PHI_RESULT (phi);
stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
/* Skip virtual phi's. The data dependences that are associated with
virtual defs/uses (i.e., memory accesses) are analyzed elsewhere. */
if (virtual_operand_p (def))
continue;
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_unknown_def_type;
/* Analyze the evolution function. */
access_fn = analyze_scalar_evolution (loop, def);
if (access_fn)
{
STRIP_NOPS (access_fn);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Access function of PHI: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, access_fn);
dump_printf (MSG_NOTE, "\n");
}
STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
= initial_condition_in_loop_num (access_fn, loop->num);
STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo)
= evolution_part_in_loop_num (access_fn, loop->num);
}
if (!access_fn
|| !vect_is_simple_iv_evolution (loop->num, access_fn, &init, &step)
|| (LOOP_VINFO_LOOP (loop_vinfo) != loop
&& TREE_CODE (step) != INTEGER_CST))
{
worklist.safe_push (phi);
continue;
}
gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo)
!= NULL_TREE);
gcc_assert (STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo) != NULL_TREE);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "Detected induction.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_induction_def;
}
/* Second - identify all reductions and nested cycles. */
while (worklist.length () > 0)
{
gimple *phi = worklist.pop ();
tree def = PHI_RESULT (phi);
stmt_vec_info stmt_vinfo = vinfo_for_stmt (phi);
gimple *reduc_stmt;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Analyze phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
gcc_assert (!virtual_operand_p (def)
&& STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_unknown_def_type);
reduc_stmt = vect_force_simple_reduction (loop_vinfo, phi,
&double_reduc, false);
if (reduc_stmt)
{
if (double_reduc)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected double reduction.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_double_reduction_def;
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
vect_double_reduction_def;
}
else
{
if (loop != LOOP_VINFO_LOOP (loop_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected vectorizable nested cycle.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_nested_cycle;
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
vect_nested_cycle;
}
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Detected reduction.\n");
STMT_VINFO_DEF_TYPE (stmt_vinfo) = vect_reduction_def;
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (reduc_stmt)) =
vect_reduction_def;
/* Store the reduction cycles for possible vectorization in
loop-aware SLP if it was not detected as reduction
chain. */
if (! GROUP_FIRST_ELEMENT (vinfo_for_stmt (reduc_stmt)))
LOOP_VINFO_REDUCTIONS (loop_vinfo).safe_push (reduc_stmt);
}
}
}
else
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unknown def-use cycle pattern.\n");
}
}
/* Function vect_analyze_scalar_cycles.
Examine the cross iteration def-use cycles of scalar variables, by
analyzing the loop-header PHIs of scalar variables. Classify each
cycle as one of the following: invariant, induction, reduction, unknown.
We do that for the loop represented by LOOP_VINFO, and also to its
inner-loop, if exists.
Examples for scalar cycles:
Example1: reduction:
loop1:
for (i=0; i<N; i++)
sum += a[i];
Example2: induction:
loop2:
for (i=0; i<N; i++)
a[i] = i; */
static void
vect_analyze_scalar_cycles (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
vect_analyze_scalar_cycles_1 (loop_vinfo, loop);
/* When vectorizing an outer-loop, the inner-loop is executed sequentially.
Reductions in such inner-loop therefore have different properties than
the reductions in the nest that gets vectorized:
1. When vectorized, they are executed in the same order as in the original
scalar loop, so we can't change the order of computation when
vectorizing them.
2. FIXME: Inner-loop reductions can be used in the inner-loop, so the
current checks are too strict. */
if (loop->inner)
vect_analyze_scalar_cycles_1 (loop_vinfo, loop->inner);
}
/* Transfer group and reduction information from STMT to its pattern stmt. */
static void
vect_fixup_reduc_chain (gimple *stmt)
{
gimple *firstp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
gimple *stmtp;
gcc_assert (!GROUP_FIRST_ELEMENT (vinfo_for_stmt (firstp))
&& GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
GROUP_SIZE (vinfo_for_stmt (firstp)) = GROUP_SIZE (vinfo_for_stmt (stmt));
do
{
stmtp = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmtp)) = firstp;
stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmt));
if (stmt)
GROUP_NEXT_ELEMENT (vinfo_for_stmt (stmtp))
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
}
while (stmt);
STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmtp)) = vect_reduction_def;
}
/* Fixup scalar cycles that now have their stmts detected as patterns. */
static void
vect_fixup_scalar_cycles_with_patterns (loop_vec_info loop_vinfo)
{
gimple *first;
unsigned i;
FOR_EACH_VEC_ELT (LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo), i, first)
if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (first)))
{
gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
while (next)
{
if (! STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (next)))
break;
next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next));
}
/* If not all stmt in the chain are patterns try to handle
the chain without patterns. */
if (! next)
{
vect_fixup_reduc_chain (first);
LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo)[i]
= STMT_VINFO_RELATED_STMT (vinfo_for_stmt (first));
}
}
}
/* Function vect_get_loop_niters.
Determine how many iterations the loop is executed and place it
in NUMBER_OF_ITERATIONS. Place the number of latch iterations
in NUMBER_OF_ITERATIONSM1. Place the condition under which the
niter information holds in ASSUMPTIONS.
Return the loop exit condition. */
static gcond *
vect_get_loop_niters (struct loop *loop, tree *assumptions,
tree *number_of_iterations, tree *number_of_iterationsm1)
{
edge exit = single_exit (loop);
struct tree_niter_desc niter_desc;
tree niter_assumptions, niter, may_be_zero;
gcond *cond = get_loop_exit_condition (loop);
*assumptions = boolean_true_node;
*number_of_iterationsm1 = chrec_dont_know;
*number_of_iterations = chrec_dont_know;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== get_loop_niters ===\n");
if (!exit)
return cond;
niter = chrec_dont_know;
may_be_zero = NULL_TREE;
niter_assumptions = boolean_true_node;
if (!number_of_iterations_exit_assumptions (loop, exit, &niter_desc, NULL)
|| chrec_contains_undetermined (niter_desc.niter))
return cond;
niter_assumptions = niter_desc.assumptions;
may_be_zero = niter_desc.may_be_zero;
niter = niter_desc.niter;
if (may_be_zero && integer_zerop (may_be_zero))
may_be_zero = NULL_TREE;
if (may_be_zero)
{
if (COMPARISON_CLASS_P (may_be_zero))
{
/* Try to combine may_be_zero with assumptions, this can simplify
computation of niter expression. */
if (niter_assumptions && !integer_nonzerop (niter_assumptions))
niter_assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
niter_assumptions,
fold_build1 (TRUTH_NOT_EXPR,
boolean_type_node,
may_be_zero));
else
niter = fold_build3 (COND_EXPR, TREE_TYPE (niter), may_be_zero,
build_int_cst (TREE_TYPE (niter), 0),
rewrite_to_non_trapping_overflow (niter));
may_be_zero = NULL_TREE;
}
else if (integer_nonzerop (may_be_zero))
{
*number_of_iterationsm1 = build_int_cst (TREE_TYPE (niter), 0);
*number_of_iterations = build_int_cst (TREE_TYPE (niter), 1);
return cond;
}
else
return cond;
}
*assumptions = niter_assumptions;
*number_of_iterationsm1 = niter;
/* We want the number of loop header executions which is the number
of latch executions plus one.
??? For UINT_MAX latch executions this number overflows to zero
for loops like do { n++; } while (n != 0); */
if (niter && !chrec_contains_undetermined (niter))
niter = fold_build2 (PLUS_EXPR, TREE_TYPE (niter), unshare_expr (niter),
build_int_cst (TREE_TYPE (niter), 1));
*number_of_iterations = niter;
return cond;
}
/* Function bb_in_loop_p
Used as predicate for dfs order traversal of the loop bbs. */
static bool
bb_in_loop_p (const_basic_block bb, const void *data)
{
const struct loop *const loop = (const struct loop *)data;
if (flow_bb_inside_loop_p (loop, bb))
return true;
return false;
}
/* Create and initialize a new loop_vec_info struct for LOOP_IN, as well as
stmt_vec_info structs for all the stmts in LOOP_IN. */
_loop_vec_info::_loop_vec_info (struct loop *loop_in)
: vec_info (vec_info::loop, init_cost (loop_in)),
loop (loop_in),
bbs (XCNEWVEC (basic_block, loop->num_nodes)),
num_itersm1 (NULL_TREE),
num_iters (NULL_TREE),
num_iters_unchanged (NULL_TREE),
num_iters_assumptions (NULL_TREE),
th (0),
versioning_threshold (0),
vectorization_factor (0),
max_vectorization_factor (0),
mask_skip_niters (NULL_TREE),
mask_compare_type (NULL_TREE),
unaligned_dr (NULL),
peeling_for_alignment (0),
ptr_mask (0),
ivexpr_map (NULL),
slp_unrolling_factor (1),
single_scalar_iteration_cost (0),
vectorizable (false),
can_fully_mask_p (true),
fully_masked_p (false),
peeling_for_gaps (false),
peeling_for_niter (false),
operands_swapped (false),
no_data_dependencies (false),
has_mask_store (false),
scalar_loop (NULL),
orig_loop_info (NULL)
{
/* Create/Update stmt_info for all stmts in the loop. */
basic_block *body = get_loop_body (loop);
for (unsigned int i = 0; i < loop->num_nodes; i++)
{
basic_block bb = body[i];
gimple_stmt_iterator si;
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
{
gimple *phi = gsi_stmt (si);
gimple_set_uid (phi, 0);
set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, this));
}
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
gimple *stmt = gsi_stmt (si);
gimple_set_uid (stmt, 0);
set_vinfo_for_stmt (stmt, new_stmt_vec_info (stmt, this));
}
}
free (body);
/* CHECKME: We want to visit all BBs before their successors (except for
latch blocks, for which this assertion wouldn't hold). In the simple
case of the loop forms we allow, a dfs order of the BBs would the same
as reversed postorder traversal, so we are safe. */
unsigned int nbbs = dfs_enumerate_from (loop->header, 0, bb_in_loop_p,
bbs, loop->num_nodes, loop);
gcc_assert (nbbs == loop->num_nodes);
}
/* Free all levels of MASKS. */
void
release_vec_loop_masks (vec_loop_masks *masks)
{
rgroup_masks *rgm;
unsigned int i;
FOR_EACH_VEC_ELT (*masks, i, rgm)
rgm->masks.release ();
masks->release ();
}
/* Free all memory used by the _loop_vec_info, as well as all the
stmt_vec_info structs of all the stmts in the loop. */
_loop_vec_info::~_loop_vec_info ()
{
int nbbs;
gimple_stmt_iterator si;
int j;
nbbs = loop->num_nodes;
for (j = 0; j < nbbs; j++)
{
basic_block bb = bbs[j];
for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si))
free_stmt_vec_info (gsi_stmt (si));
for (si = gsi_start_bb (bb); !gsi_end_p (si); )
{
gimple *stmt = gsi_stmt (si);
/* We may have broken canonical form by moving a constant
into RHS1 of a commutative op. Fix such occurrences. */
if (operands_swapped && is_gimple_assign (stmt))
{
enum tree_code code = gimple_assign_rhs_code (stmt);
if ((code == PLUS_EXPR
|| code == POINTER_PLUS_EXPR
|| code == MULT_EXPR)
&& CONSTANT_CLASS_P (gimple_assign_rhs1 (stmt)))
swap_ssa_operands (stmt,
gimple_assign_rhs1_ptr (stmt),
gimple_assign_rhs2_ptr (stmt));
else if (code == COND_EXPR
&& CONSTANT_CLASS_P (gimple_assign_rhs2 (stmt)))
{
tree cond_expr = gimple_assign_rhs1 (stmt);
enum tree_code cond_code = TREE_CODE (cond_expr);
if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
{
bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr,
0));
cond_code = invert_tree_comparison (cond_code,
honor_nans);
if (cond_code != ERROR_MARK)
{
TREE_SET_CODE (cond_expr, cond_code);
swap_ssa_operands (stmt,
gimple_assign_rhs2_ptr (stmt),
gimple_assign_rhs3_ptr (stmt));
}
}
}
}
/* Free stmt_vec_info. */
free_stmt_vec_info (stmt);
gsi_next (&si);
}
}
free (bbs);
release_vec_loop_masks (&masks);
delete ivexpr_map;
loop->aux = NULL;
}
/* Return an invariant or register for EXPR and emit necessary
computations in the LOOP_VINFO loop preheader. */
tree
cse_and_gimplify_to_preheader (loop_vec_info loop_vinfo, tree expr)
{
if (is_gimple_reg (expr)
|| is_gimple_min_invariant (expr))
return expr;
if (! loop_vinfo->ivexpr_map)
loop_vinfo->ivexpr_map = new hash_map<tree_operand_hash, tree>;
tree &cached = loop_vinfo->ivexpr_map->get_or_insert (expr);
if (! cached)
{
gimple_seq stmts = NULL;
cached = force_gimple_operand (unshare_expr (expr),
&stmts, true, NULL_TREE);
if (stmts)
{
edge e = loop_preheader_edge (LOOP_VINFO_LOOP (loop_vinfo));
gsi_insert_seq_on_edge_immediate (e, stmts);
}
}
return cached;
}
/* Return true if we can use CMP_TYPE as the comparison type to produce
all masks required to mask LOOP_VINFO. */
static bool
can_produce_all_loop_masks_p (loop_vec_info loop_vinfo, tree cmp_type)
{
rgroup_masks *rgm;
unsigned int i;
FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
if (rgm->mask_type != NULL_TREE
&& !direct_internal_fn_supported_p (IFN_WHILE_ULT,
cmp_type, rgm->mask_type,
OPTIMIZE_FOR_SPEED))
return false;
return true;
}
/* Calculate the maximum number of scalars per iteration for every
rgroup in LOOP_VINFO. */
static unsigned int
vect_get_max_nscalars_per_iter (loop_vec_info loop_vinfo)
{
unsigned int res = 1;
unsigned int i;
rgroup_masks *rgm;
FOR_EACH_VEC_ELT (LOOP_VINFO_MASKS (loop_vinfo), i, rgm)
res = MAX (res, rgm->max_nscalars_per_iter);
return res;
}
/* Each statement in LOOP_VINFO can be masked where necessary. Check
whether we can actually generate the masks required. Return true if so,
storing the type of the scalar IV in LOOP_VINFO_MASK_COMPARE_TYPE. */
static bool
vect_verify_full_masking (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned int min_ni_width;
/* Use a normal loop if there are no statements that need masking.
This only happens in rare degenerate cases: it means that the loop
has no loads, no stores, and no live-out values. */
if (LOOP_VINFO_MASKS (loop_vinfo).is_empty ())
return false;
/* Get the maximum number of iterations that is representable
in the counter type. */
tree ni_type = TREE_TYPE (LOOP_VINFO_NITERSM1 (loop_vinfo));
widest_int max_ni = wi::to_widest (TYPE_MAX_VALUE (ni_type)) + 1;
/* Get a more refined estimate for the number of iterations. */
widest_int max_back_edges;
if (max_loop_iterations (loop, &max_back_edges))
max_ni = wi::smin (max_ni, max_back_edges + 1);
/* Account for rgroup masks, in which each bit is replicated N times. */
max_ni *= vect_get_max_nscalars_per_iter (loop_vinfo);
/* Work out how many bits we need to represent the limit. */
min_ni_width = wi::min_precision (max_ni, UNSIGNED);
/* Find a scalar mode for which WHILE_ULT is supported. */
opt_scalar_int_mode cmp_mode_iter;
tree cmp_type = NULL_TREE;
FOR_EACH_MODE_IN_CLASS (cmp_mode_iter, MODE_INT)
{
unsigned int cmp_bits = GET_MODE_BITSIZE (cmp_mode_iter.require ());
if (cmp_bits >= min_ni_width
&& targetm.scalar_mode_supported_p (cmp_mode_iter.require ()))
{
tree this_type = build_nonstandard_integer_type (cmp_bits, true);
if (this_type
&& can_produce_all_loop_masks_p (loop_vinfo, this_type))
{
/* Although we could stop as soon as we find a valid mode,
it's often better to continue until we hit Pmode, since the
operands to the WHILE are more likely to be reusable in
address calculations. */
cmp_type = this_type;
if (cmp_bits >= GET_MODE_BITSIZE (Pmode))
break;
}
}
}
if (!cmp_type)
return false;
LOOP_VINFO_MASK_COMPARE_TYPE (loop_vinfo) = cmp_type;
return true;
}
/* Calculate the cost of one scalar iteration of the loop. */
static void
vect_compute_single_scalar_iteration_cost (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes, factor;
int innerloop_iters, i;
/* Gather costs for statements in the scalar loop. */
/* FORNOW. */
innerloop_iters = 1;
if (loop->inner)
innerloop_iters = 50; /* FIXME */
for (i = 0; i < nbbs; i++)
{
gimple_stmt_iterator si;
basic_block bb = bbs[i];
if (bb->loop_father == loop->inner)
factor = innerloop_iters;
else
factor = 1;
for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
{
gimple *stmt = gsi_stmt (si);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
if (!is_gimple_assign (stmt) && !is_gimple_call (stmt))
continue;
/* Skip stmts that are not vectorized inside the loop. */
if (stmt_info
&& !STMT_VINFO_RELEVANT_P (stmt_info)
&& (!STMT_VINFO_LIVE_P (stmt_info)
|| !VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
&& !STMT_VINFO_IN_PATTERN_P (stmt_info))
continue;
vect_cost_for_stmt kind;
if (STMT_VINFO_DATA_REF (stmt_info))
{
if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
kind = scalar_load;
else
kind = scalar_store;
}
else
kind = scalar_stmt;
record_stmt_cost (&LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
factor, kind, stmt_info, 0, vect_prologue);
}
}
/* Now accumulate cost. */
void *target_cost_data = init_cost (loop);
stmt_info_for_cost *si;
int j;
FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
j, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
(void) add_stmt_cost (target_cost_data, si->count,
si->kind, stmt_info, si->misalign,
vect_body);
}
unsigned dummy, body_cost = 0;
finish_cost (target_cost_data, &dummy, &body_cost, &dummy);
destroy_cost_data (target_cost_data);
LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo) = body_cost;
}
/* Function vect_analyze_loop_form_1.
Verify that certain CFG restrictions hold, including:
- the loop has a pre-header
- the loop has a single entry and exit
- the loop exit condition is simple enough
- the number of iterations can be analyzed, i.e, a countable loop. The
niter could be analyzed under some assumptions. */
bool
vect_analyze_loop_form_1 (struct loop *loop, gcond **loop_cond,
tree *assumptions, tree *number_of_iterationsm1,
tree *number_of_iterations, gcond **inner_loop_cond)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_loop_form ===\n");
/* Different restrictions apply when we are considering an inner-most loop,
vs. an outer (nested) loop.
(FORNOW. May want to relax some of these restrictions in the future). */
if (!loop->inner)
{
/* Inner-most loop. We currently require that the number of BBs is
exactly 2 (the header and latch). Vectorizable inner-most loops
look like this:
(pre-header)
|
header <--------+
| | |
| +--> latch --+
|
(exit-bb) */
if (loop->num_nodes != 2)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: control flow in loop.\n");
return false;
}
if (empty_block_p (loop->header))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: empty loop.\n");
return false;
}
}
else
{
struct loop *innerloop = loop->inner;
edge entryedge;
/* Nested loop. We currently require that the loop is doubly-nested,
contains a single inner loop, and the number of BBs is exactly 5.
Vectorizable outer-loops look like this:
(pre-header)
|
header <---+
| |
inner-loop |
| |
tail ------+
|
(exit-bb)
The inner-loop has the properties expected of inner-most loops
as described above. */
if ((loop->inner)->inner || (loop->inner)->next)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: multiple nested loops.\n");
return false;
}
if (loop->num_nodes != 5)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: control flow in loop.\n");
return false;
}
entryedge = loop_preheader_edge (innerloop);
if (entryedge->src != loop->header
|| !single_exit (innerloop)
|| single_exit (innerloop)->dest != EDGE_PRED (loop->latch, 0)->src)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: unsupported outerloop form.\n");
return false;
}
/* Analyze the inner-loop. */
tree inner_niterm1, inner_niter, inner_assumptions;
if (! vect_analyze_loop_form_1 (loop->inner, inner_loop_cond,
&inner_assumptions, &inner_niterm1,
&inner_niter, NULL)
/* Don't support analyzing niter under assumptions for inner
loop. */
|| !integer_onep (inner_assumptions))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: Bad inner loop.\n");
return false;
}
if (!expr_invariant_in_loop_p (loop, inner_niter))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: inner-loop count not"
" invariant.\n");
return false;
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Considering outer-loop vectorization.\n");
}
if (!single_exit (loop)
|| EDGE_COUNT (loop->header->preds) != 2)
{
if (dump_enabled_p ())
{
if (!single_exit (loop))
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: multiple exits.\n");
else if (EDGE_COUNT (loop->header->preds) != 2)
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: too many incoming edges.\n");
}
return false;
}
/* We assume that the loop exit condition is at the end of the loop. i.e,
that the loop is represented as a do-while (with a proper if-guard
before the loop if needed), where the loop header contains all the
executable statements, and the latch is empty. */
if (!empty_block_p (loop->latch)
|| !gimple_seq_empty_p (phi_nodes (loop->latch)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: latch block not empty.\n");
return false;
}
/* Make sure the exit is not abnormal. */
edge e = single_exit (loop);
if (e->flags & EDGE_ABNORMAL)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: abnormal loop exit edge.\n");
return false;
}
*loop_cond = vect_get_loop_niters (loop, assumptions, number_of_iterations,
number_of_iterationsm1);
if (!*loop_cond)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: complicated exit condition.\n");
return false;
}
if (integer_zerop (*assumptions)
|| !*number_of_iterations
|| chrec_contains_undetermined (*number_of_iterations))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: number of iterations cannot be "
"computed.\n");
return false;
}
if (integer_zerop (*number_of_iterations))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: number of iterations = 0.\n");
return false;
}
return true;
}
/* Analyze LOOP form and return a loop_vec_info if it is of suitable form. */
loop_vec_info
vect_analyze_loop_form (struct loop *loop)
{
tree assumptions, number_of_iterations, number_of_iterationsm1;
gcond *loop_cond, *inner_loop_cond = NULL;
if (! vect_analyze_loop_form_1 (loop, &loop_cond,
&assumptions, &number_of_iterationsm1,
&number_of_iterations, &inner_loop_cond))
return NULL;
loop_vec_info loop_vinfo = new _loop_vec_info (loop);
LOOP_VINFO_NITERSM1 (loop_vinfo) = number_of_iterationsm1;
LOOP_VINFO_NITERS (loop_vinfo) = number_of_iterations;
LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = number_of_iterations;
if (!integer_onep (assumptions))
{
/* We consider to vectorize this loop by versioning it under
some assumptions. In order to do this, we need to clear
existing information computed by scev and niter analyzer. */
scev_reset_htab ();
free_numbers_of_iterations_estimates (loop);
/* Also set flag for this loop so that following scev and niter
analysis are done under the assumptions. */
loop_constraint_set (loop, LOOP_C_FINITE);
/* Also record the assumptions for versioning. */
LOOP_VINFO_NITERS_ASSUMPTIONS (loop_vinfo) = assumptions;
}
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Symbolic number of iterations is ");
dump_generic_expr (MSG_NOTE, TDF_DETAILS, number_of_iterations);
dump_printf (MSG_NOTE, "\n");
}
}
STMT_VINFO_TYPE (vinfo_for_stmt (loop_cond)) = loop_exit_ctrl_vec_info_type;
if (inner_loop_cond)
STMT_VINFO_TYPE (vinfo_for_stmt (inner_loop_cond))
= loop_exit_ctrl_vec_info_type;
gcc_assert (!loop->aux);
loop->aux = loop_vinfo;
return loop_vinfo;
}
/* Scan the loop stmts and dependent on whether there are any (non-)SLP
statements update the vectorization factor. */
static void
vect_update_vf_for_slp (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
poly_uint64 vectorization_factor;
int i;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_update_vf_for_slp ===\n");
vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
gcc_assert (known_ne (vectorization_factor, 0U));
/* If all the stmts in the loop can be SLPed, we perform only SLP, and
vectorization factor of the loop is the unrolling factor required by
the SLP instances. If that unrolling factor is 1, we say, that we
perform pure SLP on loop - cross iteration parallelism is not
exploited. */
bool only_slp_in_loop = true;
for (i = 0; i < nbbs; i++)
{
basic_block bb = bbs[i];
for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
gsi_next (&si))
{
gimple *stmt = gsi_stmt (si);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
if (STMT_VINFO_IN_PATTERN_P (stmt_info)
&& STMT_VINFO_RELATED_STMT (stmt_info))
{
stmt = STMT_VINFO_RELATED_STMT (stmt_info);
stmt_info = vinfo_for_stmt (stmt);
}
if ((STMT_VINFO_RELEVANT_P (stmt_info)
|| VECTORIZABLE_CYCLE_DEF (STMT_VINFO_DEF_TYPE (stmt_info)))
&& !PURE_SLP_STMT (stmt_info))
/* STMT needs both SLP and loop-based vectorization. */
only_slp_in_loop = false;
}
}
if (only_slp_in_loop)
{
dump_printf_loc (MSG_NOTE, vect_location,
"Loop contains only SLP stmts\n");
vectorization_factor = LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo);
}
else
{
dump_printf_loc (MSG_NOTE, vect_location,
"Loop contains SLP and non-SLP stmts\n");
/* Both the vectorization factor and unroll factor have the form
current_vector_size * X for some rational X, so they must have
a common multiple. */
vectorization_factor
= force_common_multiple (vectorization_factor,
LOOP_VINFO_SLP_UNROLLING_FACTOR (loop_vinfo));
}
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = vectorization_factor;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Updating vectorization factor to ");
dump_dec (MSG_NOTE, vectorization_factor);
dump_printf (MSG_NOTE, ".\n");
}
}
/* Return true if STMT_INFO describes a double reduction phi and if
the other phi in the reduction is also relevant for vectorization.
This rejects cases such as:
outer1:
x_1 = PHI <x_3(outer2), ...>;
...
inner:
x_2 = ...;
...
outer2:
x_3 = PHI <x_2(inner)>;
if nothing in x_2 or elsewhere makes x_1 relevant. */
static bool
vect_active_double_reduction_p (stmt_vec_info stmt_info)
{
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_double_reduction_def)
return false;
gimple *other_phi = STMT_VINFO_REDUC_DEF (stmt_info);
return STMT_VINFO_RELEVANT_P (vinfo_for_stmt (other_phi));
}
/* Function vect_analyze_loop_operations.
Scan the loop stmts and make sure they are all vectorizable. */
static bool
vect_analyze_loop_operations (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
int i;
stmt_vec_info stmt_info;
bool need_to_vectorize = false;
bool ok;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vect_analyze_loop_operations ===\n");
for (i = 0; i < nbbs; i++)
{
basic_block bb = bbs[i];
for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
gsi_next (&si))
{
gphi *phi = si.phi ();
ok = true;
stmt_info = vinfo_for_stmt (phi);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "examining phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
if (virtual_operand_p (gimple_phi_result (phi)))
continue;
/* Inner-loop loop-closed exit phi in outer-loop vectorization
(i.e., a phi in the tail of the outer-loop). */
if (! is_loop_header_bb_p (bb))
{
/* FORNOW: we currently don't support the case that these phis
are not used in the outerloop (unless it is double reduction,
i.e., this phi is vect_reduction_def), cause this case
requires to actually do something here. */
if (STMT_VINFO_LIVE_P (stmt_info)
&& !vect_active_double_reduction_p (stmt_info))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Unsupported loop-closed phi in "
"outer-loop.\n");
return false;
}
/* If PHI is used in the outer loop, we check that its operand
is defined in the inner loop. */
if (STMT_VINFO_RELEVANT_P (stmt_info))
{
tree phi_op;
gimple *op_def_stmt;
if (gimple_phi_num_args (phi) != 1)
return false;
phi_op = PHI_ARG_DEF (phi, 0);
if (TREE_CODE (phi_op) != SSA_NAME)
return false;
op_def_stmt = SSA_NAME_DEF_STMT (phi_op);
if (gimple_nop_p (op_def_stmt)
|| !flow_bb_inside_loop_p (loop, gimple_bb (op_def_stmt))
|| !vinfo_for_stmt (op_def_stmt))
return false;
if (STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
!= vect_used_in_outer
&& STMT_VINFO_RELEVANT (vinfo_for_stmt (op_def_stmt))
!= vect_used_in_outer_by_reduction)
return false;
}
continue;
}
gcc_assert (stmt_info);
if ((STMT_VINFO_RELEVANT (stmt_info) == vect_used_in_scope
|| STMT_VINFO_LIVE_P (stmt_info))
&& STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
{
/* A scalar-dependence cycle that we don't support. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: scalar dependence cycle.\n");
return false;
}
if (STMT_VINFO_RELEVANT_P (stmt_info))
{
need_to_vectorize = true;
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
&& ! PURE_SLP_STMT (stmt_info))
ok = vectorizable_induction (phi, NULL, NULL, NULL);
else if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
&& ! PURE_SLP_STMT (stmt_info))
ok = vectorizable_reduction (phi, NULL, NULL, NULL, NULL);
}
/* SLP PHIs are tested by vect_slp_analyze_node_operations. */
if (ok
&& STMT_VINFO_LIVE_P (stmt_info)
&& !PURE_SLP_STMT (stmt_info))
ok = vectorizable_live_operation (phi, NULL, NULL, -1, NULL);
if (!ok)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: relevant phi not "
"supported: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, phi, 0);
}
return false;
}
}
for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
gsi_next (&si))
{
gimple *stmt = gsi_stmt (si);
if (!gimple_clobber_p (stmt)
&& !vect_analyze_stmt (stmt, &need_to_vectorize, NULL, NULL))
return false;
}
} /* bbs */
/* All operations in the loop are either irrelevant (deal with loop
control, or dead), or only used outside the loop and can be moved
out of the loop (e.g. invariants, inductions). The loop can be
optimized away by scalar optimizations. We're better off not
touching this loop. */
if (!need_to_vectorize)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"All the computation can be taken out of the loop.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: redundant loop. no profit to "
"vectorize.\n");
return false;
}
return true;
}
/* Analyze the cost of the loop described by LOOP_VINFO. Decide if it
is worthwhile to vectorize. Return 1 if definitely yes, 0 if
definitely no, or -1 if it's worth retrying. */
static int
vect_analyze_loop_costing (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
/* Only fully-masked loops can have iteration counts less than the
vectorization factor. */
if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
{
HOST_WIDE_INT max_niter;
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
max_niter = LOOP_VINFO_INT_NITERS (loop_vinfo);
else
max_niter = max_stmt_executions_int (loop);
if (max_niter != -1
&& (unsigned HOST_WIDE_INT) max_niter < assumed_vf)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: iteration count smaller than "
"vectorization factor.\n");
return 0;
}
}
int min_profitable_iters, min_profitable_estimate;
vect_estimate_min_profitable_iters (loop_vinfo, &min_profitable_iters,
&min_profitable_estimate);
if (min_profitable_iters < 0)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization not profitable.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vector version will never be "
"profitable.\n");
return -1;
}
int min_scalar_loop_bound = (PARAM_VALUE (PARAM_MIN_VECT_LOOP_BOUND)
* assumed_vf);
/* Use the cost model only if it is more conservative than user specified
threshold. */
unsigned int th = (unsigned) MAX (min_scalar_loop_bound,
min_profitable_iters);
LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = th;
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& LOOP_VINFO_INT_NITERS (loop_vinfo) < th)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: vectorization not profitable.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"not vectorized: iteration count smaller than user "
"specified loop bound parameter or minimum profitable "
"iterations (whichever is more conservative).\n");
return 0;
}
HOST_WIDE_INT estimated_niter = estimated_stmt_executions_int (loop);
if (estimated_niter == -1)
estimated_niter = likely_max_stmt_executions_int (loop);
if (estimated_niter != -1
&& ((unsigned HOST_WIDE_INT) estimated_niter
< MAX (th, (unsigned) min_profitable_estimate)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: estimated iteration count too "
"small.\n");
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"not vectorized: estimated iteration count smaller "
"than specified loop bound parameter or minimum "
"profitable iterations (whichever is more "
"conservative).\n");
return -1;
}
return 1;
}
/* Function vect_analyze_loop_2.
Apply a set of analyses on LOOP, and create a loop_vec_info struct
for it. The different analyses will record information in the
loop_vec_info struct. */
static bool
vect_analyze_loop_2 (loop_vec_info loop_vinfo, bool &fatal)
{
bool ok;
int res;
unsigned int max_vf = MAX_VECTORIZATION_FACTOR;
poly_uint64 min_vf = 2;
unsigned int n_stmts = 0;
/* The first group of checks is independent of the vector size. */
fatal = true;
/* Find all data references in the loop (which correspond to vdefs/vuses)
and analyze their evolution in the loop. */
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
loop_p loop = LOOP_VINFO_LOOP (loop_vinfo);
if (!find_loop_nest (loop, &LOOP_VINFO_LOOP_NEST (loop_vinfo)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop nest containing two "
"or more consecutive inner loops cannot be "
"vectorized\n");
return false;
}
for (unsigned i = 0; i < loop->num_nodes; i++)
for (gimple_stmt_iterator gsi = gsi_start_bb (bbs[i]);
!gsi_end_p (gsi); gsi_next (&gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt))
continue;
++n_stmts;
if (!find_data_references_in_stmt (loop, stmt,
&LOOP_VINFO_DATAREFS (loop_vinfo)))
{
if (is_gimple_call (stmt) && loop->safelen)
{
tree fndecl = gimple_call_fndecl (stmt), op;
if (fndecl != NULL_TREE)
{
cgraph_node *node = cgraph_node::get (fndecl);
if (node != NULL && node->simd_clones != NULL)
{
unsigned int j, n = gimple_call_num_args (stmt);
for (j = 0; j < n; j++)
{
op = gimple_call_arg (stmt, j);
if (DECL_P (op)
|| (REFERENCE_CLASS_P (op)
&& get_base_address (op)))
break;
}
op = gimple_call_lhs (stmt);
/* Ignore #pragma omp declare simd functions
if they don't have data references in the
call stmt itself. */
if (j == n
&& !(op
&& (DECL_P (op)
|| (REFERENCE_CLASS_P (op)
&& get_base_address (op)))))
continue;
}
}
}
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: loop contains function "
"calls or data references that cannot "
"be analyzed\n");
return false;
}
}
/* Analyze the data references and also adjust the minimal
vectorization factor according to the loads and stores. */
ok = vect_analyze_data_refs (loop_vinfo, &min_vf);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data references.\n");
return false;
}
/* Classify all cross-iteration scalar data-flow cycles.
Cross-iteration cycles caused by virtual phis are analyzed separately. */
vect_analyze_scalar_cycles (loop_vinfo);
vect_pattern_recog (loop_vinfo);
vect_fixup_scalar_cycles_with_patterns (loop_vinfo);
/* Analyze the access patterns of the data-refs in the loop (consecutive,
complex, etc.). FORNOW: Only handle consecutive access pattern. */
ok = vect_analyze_data_ref_accesses (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data access.\n");
return false;
}
/* Data-flow analysis to detect stmts that do not need to be vectorized. */
ok = vect_mark_stmts_to_be_vectorized (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unexpected pattern.\n");
return false;
}
/* While the rest of the analysis below depends on it in some way. */
fatal = false;
/* Analyze data dependences between the data-refs in the loop
and adjust the maximum vectorization factor according to
the dependences.
FORNOW: fail at the first data dependence that we encounter. */
ok = vect_analyze_data_ref_dependences (loop_vinfo, &max_vf);
if (!ok
|| (max_vf != MAX_VECTORIZATION_FACTOR
&& maybe_lt (max_vf, min_vf)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data dependence.\n");
return false;
}
LOOP_VINFO_MAX_VECT_FACTOR (loop_vinfo) = max_vf;
ok = vect_determine_vectorization_factor (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't determine vectorization factor.\n");
return false;
}
if (max_vf != MAX_VECTORIZATION_FACTOR
&& maybe_lt (max_vf, LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data dependence.\n");
return false;
}
/* Compute the scalar iteration cost. */
vect_compute_single_scalar_iteration_cost (loop_vinfo);
poly_uint64 saved_vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
unsigned th;
/* Check the SLP opportunities in the loop, analyze and build SLP trees. */
ok = vect_analyze_slp (loop_vinfo, n_stmts);
if (!ok)
return false;
/* If there are any SLP instances mark them as pure_slp. */
bool slp = vect_make_slp_decision (loop_vinfo);
if (slp)
{
/* Find stmts that need to be both vectorized and SLPed. */
vect_detect_hybrid_slp (loop_vinfo);
/* Update the vectorization factor based on the SLP decision. */
vect_update_vf_for_slp (loop_vinfo);
}
bool saved_can_fully_mask_p = LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo);
/* We don't expect to have to roll back to anything other than an empty
set of rgroups. */
gcc_assert (LOOP_VINFO_MASKS (loop_vinfo).is_empty ());
/* This is the point where we can re-start analysis with SLP forced off. */
start_over:
/* Now the vectorization factor is final. */
poly_uint64 vectorization_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
gcc_assert (known_ne (vectorization_factor, 0U));
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo) && dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"vectorization_factor = ");
dump_dec (MSG_NOTE, vectorization_factor);
dump_printf (MSG_NOTE, ", niters = " HOST_WIDE_INT_PRINT_DEC "\n",
LOOP_VINFO_INT_NITERS (loop_vinfo));
}
HOST_WIDE_INT max_niter
= likely_max_stmt_executions_int (LOOP_VINFO_LOOP (loop_vinfo));
/* Analyze the alignment of the data-refs in the loop.
Fail if a data reference is found that cannot be vectorized. */
ok = vect_analyze_data_refs_alignment (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data alignment.\n");
return false;
}
/* Prune the list of ddrs to be tested at run-time by versioning for alias.
It is important to call pruning after vect_analyze_data_ref_accesses,
since we use grouping information gathered by interleaving analysis. */
ok = vect_prune_runtime_alias_test_list (loop_vinfo);
if (!ok)
return false;
/* Do not invoke vect_enhance_data_refs_alignment for eplilogue
vectorization. */
if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
{
/* This pass will decide on using loop versioning and/or loop peeling in
order to enhance the alignment of data references in the loop. */
ok = vect_enhance_data_refs_alignment (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad data alignment.\n");
return false;
}
}
if (slp)
{
/* Analyze operations in the SLP instances. Note this may
remove unsupported SLP instances which makes the above
SLP kind detection invalid. */
unsigned old_size = LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length ();
vect_slp_analyze_operations (loop_vinfo);
if (LOOP_VINFO_SLP_INSTANCES (loop_vinfo).length () != old_size)
goto again;
}
/* Scan all the remaining operations in the loop that are not subject
to SLP and make sure they are vectorizable. */
ok = vect_analyze_loop_operations (loop_vinfo);
if (!ok)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad operation or unsupported loop bound.\n");
return false;
}
/* Decide whether to use a fully-masked loop for this vectorization
factor. */
LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
= (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo)
&& vect_verify_full_masking (loop_vinfo));
if (dump_enabled_p ())
{
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
dump_printf_loc (MSG_NOTE, vect_location,
"using a fully-masked loop.\n");
else
dump_printf_loc (MSG_NOTE, vect_location,
"not using a fully-masked loop.\n");
}
/* If epilog loop is required because of data accesses with gaps,
one additional iteration needs to be peeled. Check if there is
enough iterations for vectorization. */
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
&& LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
{
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
tree scalar_niters = LOOP_VINFO_NITERSM1 (loop_vinfo);
if (known_lt (wi::to_widest (scalar_niters), vf))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"loop has no enough iterations to support"
" peeling for gaps.\n");
return false;
}
}
/* Check the costings of the loop make vectorizing worthwhile. */
res = vect_analyze_loop_costing (loop_vinfo);
if (res < 0)
goto again;
if (!res)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Loop costings not worthwhile.\n");
return false;
}
/* Decide whether we need to create an epilogue loop to handle
remaining scalar iterations. */
th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
unsigned HOST_WIDE_INT const_vf;
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
/* The main loop handles all iterations. */
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
else if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) > 0)
{
if (!multiple_p (LOOP_VINFO_INT_NITERS (loop_vinfo)
- LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo),
LOOP_VINFO_VECT_FACTOR (loop_vinfo)))
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
}
else if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
|| !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&const_vf)
|| ((tree_ctz (LOOP_VINFO_NITERS (loop_vinfo))
< (unsigned) exact_log2 (const_vf))
/* In case of versioning, check if the maximum number of
iterations is greater than th. If they are identical,
the epilogue is unnecessary. */
&& (!LOOP_REQUIRES_VERSIONING (loop_vinfo)
|| ((unsigned HOST_WIDE_INT) max_niter
> (th / const_vf) * const_vf))))
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = true;
/* If an epilogue loop is required make sure we can create one. */
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo)
|| LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "epilog loop required\n");
if (!vect_can_advance_ivs_p (loop_vinfo)
|| !slpeel_can_duplicate_loop_p (LOOP_VINFO_LOOP (loop_vinfo),
single_exit (LOOP_VINFO_LOOP
(loop_vinfo))))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not vectorized: can't create required "
"epilog loop\n");
goto again;
}
}
/* During peeling, we need to check if number of loop iterations is
enough for both peeled prolog loop and vector loop. This check
can be merged along with threshold check of loop versioning, so
increase threshold for this case if necessary. */
if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
{
poly_uint64 niters_th = 0;
if (!vect_use_loop_mask_for_alignment_p (loop_vinfo))
{
/* Niters for peeled prolog loop. */
if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
{
struct data_reference *dr = LOOP_VINFO_UNALIGNED_DR (loop_vinfo);
tree vectype
= STMT_VINFO_VECTYPE (vinfo_for_stmt (DR_STMT (dr)));
niters_th += TYPE_VECTOR_SUBPARTS (vectype) - 1;
}
else
niters_th += LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
}
/* Niters for at least one iteration of vectorized loop. */
if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
niters_th += LOOP_VINFO_VECT_FACTOR (loop_vinfo);
/* One additional iteration because of peeling for gap. */
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
niters_th += 1;
LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = niters_th;
}
gcc_assert (known_eq (vectorization_factor,
LOOP_VINFO_VECT_FACTOR (loop_vinfo)));
/* Ok to vectorize! */
return true;
again:
/* Try again with SLP forced off but if we didn't do any SLP there is
no point in re-trying. */
if (!slp)
return false;
/* If there are reduction chains re-trying will fail anyway. */
if (! LOOP_VINFO_REDUCTION_CHAINS (loop_vinfo).is_empty ())
return false;
/* Likewise if the grouped loads or stores in the SLP cannot be handled
via interleaving or lane instructions. */
slp_instance instance;
slp_tree node;
unsigned i, j;
FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
{
stmt_vec_info vinfo;
vinfo = vinfo_for_stmt
(SLP_TREE_SCALAR_STMTS (SLP_INSTANCE_TREE (instance))[0]);
if (! STMT_VINFO_GROUPED_ACCESS (vinfo))
continue;
vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo));
unsigned int size = STMT_VINFO_GROUP_SIZE (vinfo);
tree vectype = STMT_VINFO_VECTYPE (vinfo);
if (! vect_store_lanes_supported (vectype, size, false)
&& ! known_eq (TYPE_VECTOR_SUBPARTS (vectype), 1U)
&& ! vect_grouped_store_supported (vectype, size))
return false;
FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), j, node)
{
vinfo = vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
vinfo = vinfo_for_stmt (STMT_VINFO_GROUP_FIRST_ELEMENT (vinfo));
bool single_element_p = !STMT_VINFO_GROUP_NEXT_ELEMENT (vinfo);
size = STMT_VINFO_GROUP_SIZE (vinfo);
vectype = STMT_VINFO_VECTYPE (vinfo);
if (! vect_load_lanes_supported (vectype, size, false)
&& ! vect_grouped_load_supported (vectype, single_element_p,
size))
return false;
}
}
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"re-trying with SLP disabled\n");
/* Roll back state appropriately. No SLP this time. */
slp = false;
/* Restore vectorization factor as it were without SLP. */
LOOP_VINFO_VECT_FACTOR (loop_vinfo) = saved_vectorization_factor;
/* Free the SLP instances. */
FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), j, instance)
vect_free_slp_instance (instance);
LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
/* Reset SLP type to loop_vect on all stmts. */
for (i = 0; i < LOOP_VINFO_LOOP (loop_vinfo)->num_nodes; ++i)
{
basic_block bb = LOOP_VINFO_BBS (loop_vinfo)[i];
for (gimple_stmt_iterator si = gsi_start_phis (bb);
!gsi_end_p (si); gsi_next (&si))
{
stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si));
STMT_SLP_TYPE (stmt_info) = loop_vect;
}
for (gimple_stmt_iterator si = gsi_start_bb (bb);
!gsi_end_p (si); gsi_next (&si))
{
stmt_vec_info stmt_info = vinfo_for_stmt (gsi_stmt (si));
STMT_SLP_TYPE (stmt_info) = loop_vect;
if (STMT_VINFO_IN_PATTERN_P (stmt_info))
{
stmt_info = vinfo_for_stmt (STMT_VINFO_RELATED_STMT (stmt_info));
STMT_SLP_TYPE (stmt_info) = loop_vect;
for (gimple_stmt_iterator pi
= gsi_start (STMT_VINFO_PATTERN_DEF_SEQ (stmt_info));
!gsi_end_p (pi); gsi_next (&pi))
{
gimple *pstmt = gsi_stmt (pi);
STMT_SLP_TYPE (vinfo_for_stmt (pstmt)) = loop_vect;
}
}
}
}
/* Free optimized alias test DDRS. */
LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).truncate (0);
LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).release ();
LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).release ();
/* Reset target cost data. */
destroy_cost_data (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo));
LOOP_VINFO_TARGET_COST_DATA (loop_vinfo)
= init_cost (LOOP_VINFO_LOOP (loop_vinfo));
/* Reset accumulated rgroup information. */
release_vec_loop_masks (&LOOP_VINFO_MASKS (loop_vinfo));
/* Reset assorted flags. */
LOOP_VINFO_PEELING_FOR_NITER (loop_vinfo) = false;
LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) = false;
LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo) = 0;
LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo) = 0;
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = saved_can_fully_mask_p;
goto start_over;
}
/* Function vect_analyze_loop.
Apply a set of analyses on LOOP, and create a loop_vec_info struct
for it. The different analyses will record information in the
loop_vec_info struct. If ORIG_LOOP_VINFO is not NULL epilogue must
be vectorized. */
loop_vec_info
vect_analyze_loop (struct loop *loop, loop_vec_info orig_loop_vinfo)
{
loop_vec_info loop_vinfo;
auto_vector_sizes vector_sizes;
/* Autodetect first vector size we try. */
current_vector_size = 0;
targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
unsigned int next_size = 0;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"===== analyze_loop_nest =====\n");
if (loop_outer (loop)
&& loop_vec_info_for_loop (loop_outer (loop))
&& LOOP_VINFO_VECTORIZABLE_P (loop_vec_info_for_loop (loop_outer (loop))))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"outer-loop already vectorized.\n");
return NULL;
}
poly_uint64 autodetected_vector_size = 0;
while (1)
{
/* Check the CFG characteristics of the loop (nesting, entry/exit). */
loop_vinfo = vect_analyze_loop_form (loop);
if (!loop_vinfo)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"bad loop form.\n");
return NULL;
}
bool fatal = false;
if (orig_loop_vinfo)
LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo) = orig_loop_vinfo;
if (vect_analyze_loop_2 (loop_vinfo, fatal))
{
LOOP_VINFO_VECTORIZABLE_P (loop_vinfo) = 1;
return loop_vinfo;
}
delete loop_vinfo;
if (next_size == 0)
autodetected_vector_size = current_vector_size;
if (next_size < vector_sizes.length ()
&& known_eq (vector_sizes[next_size], autodetected_vector_size))
next_size += 1;
if (fatal
|| next_size == vector_sizes.length ()
|| known_eq (current_vector_size, 0U))
return NULL;
/* Try the next biggest vector size. */
current_vector_size = vector_sizes[next_size++];
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"***** Re-trying analysis with "
"vector size ");
dump_dec (MSG_NOTE, current_vector_size);
dump_printf (MSG_NOTE, "\n");
}
}
}
/* Return true if there is an in-order reduction function for CODE, storing
it in *REDUC_FN if so. */
static bool
fold_left_reduction_fn (tree_code code, internal_fn *reduc_fn)
{
switch (code)
{
case PLUS_EXPR:
*reduc_fn = IFN_FOLD_LEFT_PLUS;
return true;
default:
return false;
}
}
/* Function reduction_fn_for_scalar_code
Input:
CODE - tree_code of a reduction operations.
Output:
REDUC_FN - the corresponding internal function to be used to reduce the
vector of partial results into a single scalar result, or IFN_LAST
if the operation is a supported reduction operation, but does not have
such an internal function.
Return FALSE if CODE currently cannot be vectorized as reduction. */
static bool
reduction_fn_for_scalar_code (enum tree_code code, internal_fn *reduc_fn)
{
switch (code)
{
case MAX_EXPR:
*reduc_fn = IFN_REDUC_MAX;
return true;
case MIN_EXPR:
*reduc_fn = IFN_REDUC_MIN;
return true;
case PLUS_EXPR:
*reduc_fn = IFN_REDUC_PLUS;
return true;
case BIT_AND_EXPR:
*reduc_fn = IFN_REDUC_AND;
return true;
case BIT_IOR_EXPR:
*reduc_fn = IFN_REDUC_IOR;
return true;
case BIT_XOR_EXPR:
*reduc_fn = IFN_REDUC_XOR;
return true;
case MULT_EXPR:
case MINUS_EXPR:
*reduc_fn = IFN_LAST;
return true;
default:
return false;
}
}
/* If there is a neutral value X such that SLP reduction NODE would not
be affected by the introduction of additional X elements, return that X,
otherwise return null. CODE is the code of the reduction. REDUC_CHAIN
is true if the SLP statements perform a single reduction, false if each
statement performs an independent reduction. */
static tree
neutral_op_for_slp_reduction (slp_tree slp_node, tree_code code,
bool reduc_chain)
{
vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
gimple *stmt = stmts[0];
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
tree vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
tree scalar_type = TREE_TYPE (vector_type);
struct loop *loop = gimple_bb (stmt)->loop_father;
gcc_assert (loop);
switch (code)
{
case WIDEN_SUM_EXPR:
case DOT_PROD_EXPR:
case SAD_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
return build_zero_cst (scalar_type);
case MULT_EXPR:
return build_one_cst (scalar_type);
case BIT_AND_EXPR:
return build_all_ones_cst (scalar_type);
case MAX_EXPR:
case MIN_EXPR:
/* For MIN/MAX the initial values are neutral. A reduction chain
has only a single initial value, so that value is neutral for
all statements. */
if (reduc_chain)
return PHI_ARG_DEF_FROM_EDGE (stmt, loop_preheader_edge (loop));
return NULL_TREE;
default:
return NULL_TREE;
}
}
/* Error reporting helper for vect_is_simple_reduction below. GIMPLE statement
STMT is printed with a message MSG. */
static void
report_vect_op (dump_flags_t msg_type, gimple *stmt, const char *msg)
{
dump_printf_loc (msg_type, vect_location, "%s", msg);
dump_gimple_stmt (msg_type, TDF_SLIM, stmt, 0);
}
/* Detect SLP reduction of the form:
#a1 = phi <a5, a0>
a2 = operation (a1)
a3 = operation (a2)
a4 = operation (a3)
a5 = operation (a4)
#a = phi <a5>
PHI is the reduction phi node (#a1 = phi <a5, a0> above)
FIRST_STMT is the first reduction stmt in the chain
(a2 = operation (a1)).
Return TRUE if a reduction chain was detected. */
static bool
vect_is_slp_reduction (loop_vec_info loop_info, gimple *phi,
gimple *first_stmt)
{
struct loop *loop = (gimple_bb (phi))->loop_father;
struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
enum tree_code code;
gimple *current_stmt = NULL, *loop_use_stmt = NULL, *first, *next_stmt;
stmt_vec_info use_stmt_info, current_stmt_info;
tree lhs;
imm_use_iterator imm_iter;
use_operand_p use_p;
int nloop_uses, size = 0, n_out_of_loop_uses;
bool found = false;
if (loop != vect_loop)
return false;
lhs = PHI_RESULT (phi);
code = gimple_assign_rhs_code (first_stmt);
while (1)
{
nloop_uses = 0;
n_out_of_loop_uses = 0;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
/* Check if we got back to the reduction phi. */
if (use_stmt == phi)
{
loop_use_stmt = use_stmt;
found = true;
break;
}
if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
{
loop_use_stmt = use_stmt;
nloop_uses++;
}
else
n_out_of_loop_uses++;
/* There are can be either a single use in the loop or two uses in
phi nodes. */
if (nloop_uses > 1 || (n_out_of_loop_uses && nloop_uses))
return false;
}
if (found)
break;
/* We reached a statement with no loop uses. */
if (nloop_uses == 0)
return false;
/* This is a loop exit phi, and we haven't reached the reduction phi. */
if (gimple_code (loop_use_stmt) == GIMPLE_PHI)
return false;
if (!is_gimple_assign (loop_use_stmt)
|| code != gimple_assign_rhs_code (loop_use_stmt)
|| !flow_bb_inside_loop_p (loop, gimple_bb (loop_use_stmt)))
return false;
/* Insert USE_STMT into reduction chain. */
use_stmt_info = vinfo_for_stmt (loop_use_stmt);
if (current_stmt)
{
current_stmt_info = vinfo_for_stmt (current_stmt);
GROUP_NEXT_ELEMENT (current_stmt_info) = loop_use_stmt;
GROUP_FIRST_ELEMENT (use_stmt_info)
= GROUP_FIRST_ELEMENT (current_stmt_info);
}
else
GROUP_FIRST_ELEMENT (use_stmt_info) = loop_use_stmt;
lhs = gimple_assign_lhs (loop_use_stmt);
current_stmt = loop_use_stmt;
size++;
}
if (!found || loop_use_stmt != phi || size < 2)
return false;
/* Swap the operands, if needed, to make the reduction operand be the second
operand. */
lhs = PHI_RESULT (phi);
next_stmt = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
while (next_stmt)
{
if (gimple_assign_rhs2 (next_stmt) == lhs)
{
tree op = gimple_assign_rhs1 (next_stmt);
gimple *def_stmt = NULL;
if (TREE_CODE (op) == SSA_NAME)
def_stmt = SSA_NAME_DEF_STMT (op);
/* Check that the other def is either defined in the loop
("vect_internal_def"), or it's an induction (defined by a
loop-header phi-node). */
if (def_stmt
&& gimple_bb (def_stmt)
&& flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
&& (is_gimple_assign (def_stmt)
|| is_gimple_call (def_stmt)
|| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_induction_def
|| (gimple_code (def_stmt) == GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def_stmt)))))
{
lhs = gimple_assign_lhs (next_stmt);
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
continue;
}
return false;
}
else
{
tree op = gimple_assign_rhs2 (next_stmt);
gimple *def_stmt = NULL;
if (TREE_CODE (op) == SSA_NAME)
def_stmt = SSA_NAME_DEF_STMT (op);
/* Check that the other def is either defined in the loop
("vect_internal_def"), or it's an induction (defined by a
loop-header phi-node). */
if (def_stmt
&& gimple_bb (def_stmt)
&& flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
&& (is_gimple_assign (def_stmt)
|| is_gimple_call (def_stmt)
|| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_induction_def
|| (gimple_code (def_stmt) == GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def_stmt)))))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "swapping oprnds: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, next_stmt, 0);
}
swap_ssa_operands (next_stmt,
gimple_assign_rhs1_ptr (next_stmt),
gimple_assign_rhs2_ptr (next_stmt));
update_stmt (next_stmt);
if (CONSTANT_CLASS_P (gimple_assign_rhs1 (next_stmt)))
LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
}
else
return false;
}
lhs = gimple_assign_lhs (next_stmt);
next_stmt = GROUP_NEXT_ELEMENT (vinfo_for_stmt (next_stmt));
}
/* Save the chain for further analysis in SLP detection. */
first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (current_stmt));
LOOP_VINFO_REDUCTION_CHAINS (loop_info).safe_push (first);
GROUP_SIZE (vinfo_for_stmt (first)) = size;
return true;
}
/* Return true if we need an in-order reduction for operation CODE
on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
overflow must wrap. */
static bool
needs_fold_left_reduction_p (tree type, tree_code code,
bool need_wrapping_integral_overflow)
{
/* CHECKME: check for !flag_finite_math_only too? */
if (SCALAR_FLOAT_TYPE_P (type))
switch (code)
{
case MIN_EXPR:
case MAX_EXPR:
return false;
default:
return !flag_associative_math;
}
if (INTEGRAL_TYPE_P (type))
{
if (!operation_no_trapping_overflow (type, code))
return true;
if (need_wrapping_integral_overflow
&& !TYPE_OVERFLOW_WRAPS (type)
&& operation_can_overflow (code))
return true;
return false;
}
if (SAT_FIXED_POINT_TYPE_P (type))
return true;
return false;
}
/* Return true if the reduction PHI in LOOP with latch arg LOOP_ARG and
reduction operation CODE has a handled computation expression. */
bool
check_reduction_path (location_t loc, loop_p loop, gphi *phi, tree loop_arg,
enum tree_code code)
{
auto_vec<std::pair<ssa_op_iter, use_operand_p> > path;
auto_bitmap visited;
tree lookfor = PHI_RESULT (phi);
ssa_op_iter curri;
use_operand_p curr = op_iter_init_phiuse (&curri, phi, SSA_OP_USE);
while (USE_FROM_PTR (curr) != loop_arg)
curr = op_iter_next_use (&curri);
curri.i = curri.numops;
do
{
path.safe_push (std::make_pair (curri, curr));
tree use = USE_FROM_PTR (curr);
if (use == lookfor)
break;
gimple *def = SSA_NAME_DEF_STMT (use);
if (gimple_nop_p (def)
|| ! flow_bb_inside_loop_p (loop, gimple_bb (def)))
{
pop:
do
{
std::pair<ssa_op_iter, use_operand_p> x = path.pop ();
curri = x.first;
curr = x.second;
do
curr = op_iter_next_use (&curri);
/* Skip already visited or non-SSA operands (from iterating
over PHI args). */
while (curr != NULL_USE_OPERAND_P
&& (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
|| ! bitmap_set_bit (visited,
SSA_NAME_VERSION
(USE_FROM_PTR (curr)))));
}
while (curr == NULL_USE_OPERAND_P && ! path.is_empty ());
if (curr == NULL_USE_OPERAND_P)
break;
}
else
{
if (gimple_code (def) == GIMPLE_PHI)
curr = op_iter_init_phiuse (&curri, as_a <gphi *>(def), SSA_OP_USE);
else
curr = op_iter_init_use (&curri, def, SSA_OP_USE);
while (curr != NULL_USE_OPERAND_P
&& (TREE_CODE (USE_FROM_PTR (curr)) != SSA_NAME
|| ! bitmap_set_bit (visited,
SSA_NAME_VERSION
(USE_FROM_PTR (curr)))))
curr = op_iter_next_use (&curri);
if (curr == NULL_USE_OPERAND_P)
goto pop;
}
}
while (1);
if (dump_file && (dump_flags & TDF_DETAILS))
{
dump_printf_loc (MSG_NOTE, loc, "reduction path: ");
unsigned i;
std::pair<ssa_op_iter, use_operand_p> *x;
FOR_EACH_VEC_ELT (path, i, x)
{
dump_generic_expr (MSG_NOTE, TDF_SLIM, USE_FROM_PTR (x->second));
dump_printf (MSG_NOTE, " ");
}
dump_printf (MSG_NOTE, "\n");
}
/* Check whether the reduction path detected is valid. */
bool fail = path.length () == 0;
bool neg = false;
for (unsigned i = 1; i < path.length (); ++i)
{
gimple *use_stmt = USE_STMT (path[i].second);
tree op = USE_FROM_PTR (path[i].second);
if (! has_single_use (op)
|| ! is_gimple_assign (use_stmt))
{
fail = true;
break;
}
if (gimple_assign_rhs_code (use_stmt) != code)
{
if (code == PLUS_EXPR
&& gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
{
/* Track whether we negate the reduction value each iteration. */
if (gimple_assign_rhs2 (use_stmt) == op)
neg = ! neg;
}
else
{
fail = true;
break;
}
}
}
return ! fail && ! neg;
}
/* Function vect_is_simple_reduction
(1) Detect a cross-iteration def-use cycle that represents a simple
reduction computation. We look for the following pattern:
loop_header:
a1 = phi < a0, a2 >
a3 = ...
a2 = operation (a3, a1)
or
a3 = ...
loop_header:
a1 = phi < a0, a2 >
a2 = operation (a3, a1)
such that:
1. operation is commutative and associative and it is safe to
change the order of the computation
2. no uses for a2 in the loop (a2 is used out of the loop)
3. no uses of a1 in the loop besides the reduction operation
4. no uses of a1 outside the loop.
Conditions 1,4 are tested here.
Conditions 2,3 are tested in vect_mark_stmts_to_be_vectorized.
(2) Detect a cross-iteration def-use cycle in nested loops, i.e.,
nested cycles.
(3) Detect cycles of phi nodes in outer-loop vectorization, i.e., double
reductions:
a1 = phi < a0, a2 >
inner loop (def of a3)
a2 = phi < a3 >
(4) Detect condition expressions, ie:
for (int i = 0; i < N; i++)
if (a[i] < val)
ret_val = a[i];
*/
static gimple *
vect_is_simple_reduction (loop_vec_info loop_info, gimple *phi,
bool *double_reduc,
bool need_wrapping_integral_overflow,
enum vect_reduction_type *v_reduc_type)
{
struct loop *loop = (gimple_bb (phi))->loop_father;
struct loop *vect_loop = LOOP_VINFO_LOOP (loop_info);
gimple *def_stmt, *def1 = NULL, *def2 = NULL, *phi_use_stmt = NULL;
enum tree_code orig_code, code;
tree op1, op2, op3 = NULL_TREE, op4 = NULL_TREE;
tree type;
int nloop_uses;
tree name;
imm_use_iterator imm_iter;
use_operand_p use_p;
bool phi_def;
*double_reduc = false;
*v_reduc_type = TREE_CODE_REDUCTION;
tree phi_name = PHI_RESULT (phi);
/* ??? If there are no uses of the PHI result the inner loop reduction
won't be detected as possibly double-reduction by vectorizable_reduction
because that tries to walk the PHI arg from the preheader edge which
can be constant. See PR60382. */
if (has_zero_uses (phi_name))
return NULL;
nloop_uses = 0;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, phi_name)
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"intermediate value used outside loop.\n");
return NULL;
}
nloop_uses++;
if (nloop_uses > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction value used in loop.\n");
return NULL;
}
phi_use_stmt = use_stmt;
}
edge latch_e = loop_latch_edge (loop);
tree loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
if (TREE_CODE (loop_arg) != SSA_NAME)
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction: not ssa_name: ");
dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_SLIM, loop_arg);
dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
}
return NULL;
}
def_stmt = SSA_NAME_DEF_STMT (loop_arg);
if (is_gimple_assign (def_stmt))
{
name = gimple_assign_lhs (def_stmt);
phi_def = false;
}
else if (gimple_code (def_stmt) == GIMPLE_PHI)
{
name = PHI_RESULT (def_stmt);
phi_def = true;
}
else
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction: unhandled reduction operation: ");
dump_gimple_stmt (MSG_MISSED_OPTIMIZATION, TDF_SLIM, def_stmt, 0);
}
return NULL;
}
if (! flow_bb_inside_loop_p (loop, gimple_bb (def_stmt)))
return NULL;
nloop_uses = 0;
auto_vec<gphi *, 3> lcphis;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, name)
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (flow_bb_inside_loop_p (loop, gimple_bb (use_stmt)))
nloop_uses++;
else
/* We can have more than one loop-closed PHI. */
lcphis.safe_push (as_a <gphi *> (use_stmt));
if (nloop_uses > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction used in loop.\n");
return NULL;
}
}
/* If DEF_STMT is a phi node itself, we expect it to have a single argument
defined in the inner loop. */
if (phi_def)
{
op1 = PHI_ARG_DEF (def_stmt, 0);
if (gimple_phi_num_args (def_stmt) != 1
|| TREE_CODE (op1) != SSA_NAME)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported phi node definition.\n");
return NULL;
}
def1 = SSA_NAME_DEF_STMT (op1);
if (gimple_bb (def1)
&& flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
&& loop->inner
&& flow_bb_inside_loop_p (loop->inner, gimple_bb (def1))
&& is_gimple_assign (def1)
&& flow_bb_inside_loop_p (loop->inner, gimple_bb (phi_use_stmt)))
{
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt,
"detected double reduction: ");
*double_reduc = true;
return def_stmt;
}
return NULL;
}
/* If we are vectorizing an inner reduction we are executing that
in the original order only in case we are not dealing with a
double reduction. */
bool check_reduction = true;
if (flow_loop_nested_p (vect_loop, loop))
{
gphi *lcphi;
unsigned i;
check_reduction = false;
FOR_EACH_VEC_ELT (lcphis, i, lcphi)
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, gimple_phi_result (lcphi))
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (! flow_bb_inside_loop_p (vect_loop, gimple_bb (use_stmt)))
check_reduction = true;
}
}
bool nested_in_vect_loop = flow_loop_nested_p (vect_loop, loop);
code = orig_code = gimple_assign_rhs_code (def_stmt);
/* We can handle "res -= x[i]", which is non-associative by
simply rewriting this into "res += -x[i]". Avoid changing
gimple instruction for the first simple tests and only do this
if we're allowed to change code at all. */
if (code == MINUS_EXPR && gimple_assign_rhs2 (def_stmt) != phi_name)
code = PLUS_EXPR;
if (code == COND_EXPR)
{
if (! nested_in_vect_loop)
*v_reduc_type = COND_REDUCTION;
op3 = gimple_assign_rhs1 (def_stmt);
if (COMPARISON_CLASS_P (op3))
{
op4 = TREE_OPERAND (op3, 1);
op3 = TREE_OPERAND (op3, 0);
}
if (op3 == phi_name || op4 == phi_name)
{
if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: condition depends on previous"
" iteration: ");
return NULL;
}
op1 = gimple_assign_rhs2 (def_stmt);
op2 = gimple_assign_rhs3 (def_stmt);
}
else if (!commutative_tree_code (code) || !associative_tree_code (code))
{
if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: not commutative/associative: ");
return NULL;
}
else if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
{
op1 = gimple_assign_rhs1 (def_stmt);
op2 = gimple_assign_rhs2 (def_stmt);
}
else
{
if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: not handled operation: ");
return NULL;
}
if (TREE_CODE (op1) != SSA_NAME && TREE_CODE (op2) != SSA_NAME)
{
if (dump_enabled_p ())
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: both uses not ssa_names: ");
return NULL;
}
type = TREE_TYPE (gimple_assign_lhs (def_stmt));
if ((TREE_CODE (op1) == SSA_NAME
&& !types_compatible_p (type,TREE_TYPE (op1)))
|| (TREE_CODE (op2) == SSA_NAME
&& !types_compatible_p (type, TREE_TYPE (op2)))
|| (op3 && TREE_CODE (op3) == SSA_NAME
&& !types_compatible_p (type, TREE_TYPE (op3)))
|| (op4 && TREE_CODE (op4) == SSA_NAME
&& !types_compatible_p (type, TREE_TYPE (op4))))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"reduction: multiple types: operation type: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM, type);
dump_printf (MSG_NOTE, ", operands types: ");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
TREE_TYPE (op1));
dump_printf (MSG_NOTE, ",");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
TREE_TYPE (op2));
if (op3)
{
dump_printf (MSG_NOTE, ",");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
TREE_TYPE (op3));
}
if (op4)
{
dump_printf (MSG_NOTE, ",");
dump_generic_expr (MSG_NOTE, TDF_SLIM,
TREE_TYPE (op4));
}
dump_printf (MSG_NOTE, "\n");
}
return NULL;
}
/* Check whether it's ok to change the order of the computation.
Generally, when vectorizing a reduction we change the order of the
computation. This may change the behavior of the program in some
cases, so we need to check that this is ok. One exception is when
vectorizing an outer-loop: the inner-loop is executed sequentially,
and therefore vectorizing reductions in the inner-loop during
outer-loop vectorization is safe. */
if (check_reduction
&& *v_reduc_type == TREE_CODE_REDUCTION
&& needs_fold_left_reduction_p (type, code,
need_wrapping_integral_overflow))
*v_reduc_type = FOLD_LEFT_REDUCTION;
/* Reduction is safe. We're dealing with one of the following:
1) integer arithmetic and no trapv
2) floating point arithmetic, and special flags permit this optimization
3) nested cycle (i.e., outer loop vectorization). */
if (TREE_CODE (op1) == SSA_NAME)
def1 = SSA_NAME_DEF_STMT (op1);
if (TREE_CODE (op2) == SSA_NAME)
def2 = SSA_NAME_DEF_STMT (op2);
if (code != COND_EXPR
&& ((!def1 || gimple_nop_p (def1)) && (!def2 || gimple_nop_p (def2))))
{
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt, "reduction: no defs for operands: ");
return NULL;
}
/* Check that one def is the reduction def, defined by PHI,
the other def is either defined in the loop ("vect_internal_def"),
or it's an induction (defined by a loop-header phi-node). */
if (def2 && def2 == phi
&& (code == COND_EXPR
|| !def1 || gimple_nop_p (def1)
|| !flow_bb_inside_loop_p (loop, gimple_bb (def1))
|| (def1 && flow_bb_inside_loop_p (loop, gimple_bb (def1))
&& (is_gimple_assign (def1)
|| is_gimple_call (def1)
|| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
== vect_induction_def
|| (gimple_code (def1) == GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def1))
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def1)))))))
{
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
return def_stmt;
}
if (def1 && def1 == phi
&& (code == COND_EXPR
|| !def2 || gimple_nop_p (def2)
|| !flow_bb_inside_loop_p (loop, gimple_bb (def2))
|| (def2 && flow_bb_inside_loop_p (loop, gimple_bb (def2))
&& (is_gimple_assign (def2)
|| is_gimple_call (def2)
|| STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
== vect_induction_def
|| (gimple_code (def2) == GIMPLE_PHI
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def2))
== vect_internal_def
&& !is_loop_header_bb_p (gimple_bb (def2)))))))
{
if (! nested_in_vect_loop && orig_code != MINUS_EXPR)
{
/* Check if we can swap operands (just for simplicity - so that
the rest of the code can assume that the reduction variable
is always the last (second) argument). */
if (code == COND_EXPR)
{
/* Swap cond_expr by inverting the condition. */
tree cond_expr = gimple_assign_rhs1 (def_stmt);
enum tree_code invert_code = ERROR_MARK;
enum tree_code cond_code = TREE_CODE (cond_expr);
if (TREE_CODE_CLASS (cond_code) == tcc_comparison)
{
bool honor_nans = HONOR_NANS (TREE_OPERAND (cond_expr, 0));
invert_code = invert_tree_comparison (cond_code, honor_nans);
}
if (invert_code != ERROR_MARK)
{
TREE_SET_CODE (cond_expr, invert_code);
swap_ssa_operands (def_stmt,
gimple_assign_rhs2_ptr (def_stmt),
gimple_assign_rhs3_ptr (def_stmt));
}
else
{
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt,
"detected reduction: cannot swap operands "
"for cond_expr");
return NULL;
}
}
else
swap_ssa_operands (def_stmt, gimple_assign_rhs1_ptr (def_stmt),
gimple_assign_rhs2_ptr (def_stmt));
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt,
"detected reduction: need to swap operands: ");
if (CONSTANT_CLASS_P (gimple_assign_rhs1 (def_stmt)))
LOOP_VINFO_OPERANDS_SWAPPED (loop_info) = true;
}
else
{
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt, "detected reduction: ");
}
return def_stmt;
}
/* Try to find SLP reduction chain. */
if (! nested_in_vect_loop
&& code != COND_EXPR
&& orig_code != MINUS_EXPR
&& vect_is_slp_reduction (loop_info, phi, def_stmt))
{
if (dump_enabled_p ())
report_vect_op (MSG_NOTE, def_stmt,
"reduction: detected reduction chain: ");
return def_stmt;
}
/* Dissolve group eventually half-built by vect_is_slp_reduction. */
gimple *first = GROUP_FIRST_ELEMENT (vinfo_for_stmt (def_stmt));
while (first)
{
gimple *next = GROUP_NEXT_ELEMENT (vinfo_for_stmt (first));
GROUP_FIRST_ELEMENT (vinfo_for_stmt (first)) = NULL;
GROUP_NEXT_ELEMENT (vinfo_for_stmt (first)) = NULL;
first = next;
}
/* Look for the expression computing loop_arg from loop PHI result. */
if (check_reduction_path (vect_location, loop, as_a <gphi *> (phi), loop_arg,
code))
return def_stmt;
if (dump_enabled_p ())
{
report_vect_op (MSG_MISSED_OPTIMIZATION, def_stmt,
"reduction: unknown pattern: ");
}
return NULL;
}
/* Wrapper around vect_is_simple_reduction, which will modify code
in-place if it enables detection of more reductions. Arguments
as there. */
gimple *
vect_force_simple_reduction (loop_vec_info loop_info, gimple *phi,
bool *double_reduc,
bool need_wrapping_integral_overflow)
{
enum vect_reduction_type v_reduc_type;
gimple *def = vect_is_simple_reduction (loop_info, phi, double_reduc,
need_wrapping_integral_overflow,
&v_reduc_type);
if (def)
{
stmt_vec_info reduc_def_info = vinfo_for_stmt (phi);
STMT_VINFO_REDUC_TYPE (reduc_def_info) = v_reduc_type;
STMT_VINFO_REDUC_DEF (reduc_def_info) = def;
reduc_def_info = vinfo_for_stmt (def);
STMT_VINFO_REDUC_TYPE (reduc_def_info) = v_reduc_type;
STMT_VINFO_REDUC_DEF (reduc_def_info) = phi;
}
return def;
}
/* Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times. */
int
vect_get_known_peeling_cost (loop_vec_info loop_vinfo, int peel_iters_prologue,
int *peel_iters_epilogue,
stmt_vector_for_cost *scalar_cost_vec,
stmt_vector_for_cost *prologue_cost_vec,
stmt_vector_for_cost *epilogue_cost_vec)
{
int retval = 0;
int assumed_vf = vect_vf_for_cost (loop_vinfo);
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
*peel_iters_epilogue = assumed_vf / 2;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"cost model: epilogue peel iters set to vf/2 "
"because loop iterations are unknown .\n");
/* If peeled iterations are known but number of scalar loop
iterations are unknown, count a taken branch per peeled loop. */
retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
NULL, 0, vect_prologue);
retval = record_stmt_cost (prologue_cost_vec, 1, cond_branch_taken,
NULL, 0, vect_epilogue);
}
else
{
int niters = LOOP_VINFO_INT_NITERS (loop_vinfo);
peel_iters_prologue = niters < peel_iters_prologue ?
niters : peel_iters_prologue;
*peel_iters_epilogue = (niters - peel_iters_prologue) % assumed_vf;
/* If we need to peel for gaps, but no peeling is required, we have to
peel VF iterations. */
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) && !*peel_iters_epilogue)
*peel_iters_epilogue = assumed_vf;
}
stmt_info_for_cost *si;
int j;
if (peel_iters_prologue)
FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
{
stmt_vec_info stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
retval += record_stmt_cost (prologue_cost_vec,
si->count * peel_iters_prologue,
si->kind, stmt_info, si->misalign,
vect_prologue);
}
if (*peel_iters_epilogue)
FOR_EACH_VEC_ELT (*scalar_cost_vec, j, si)
{
stmt_vec_info stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
retval += record_stmt_cost (epilogue_cost_vec,
si->count * *peel_iters_epilogue,
si->kind, stmt_info, si->misalign,
vect_epilogue);
}
return retval;
}
/* Function vect_estimate_min_profitable_iters
Return the number of iterations required for the vector version of the
loop to be profitable relative to the cost of the scalar version of the
loop.
*RET_MIN_PROFITABLE_NITERS is a cost model profitability threshold
of iterations for vectorization. -1 value means loop vectorization
is not profitable. This returned value may be used for dynamic
profitability check.
*RET_MIN_PROFITABLE_ESTIMATE is a profitability threshold to be used
for static check against estimated number of iterations. */
static void
vect_estimate_min_profitable_iters (loop_vec_info loop_vinfo,
int *ret_min_profitable_niters,
int *ret_min_profitable_estimate)
{
int min_profitable_iters;
int min_profitable_estimate;
int peel_iters_prologue;
int peel_iters_epilogue;
unsigned vec_inside_cost = 0;
int vec_outside_cost = 0;
unsigned vec_prologue_cost = 0;
unsigned vec_epilogue_cost = 0;
int scalar_single_iter_cost = 0;
int scalar_outside_cost = 0;
int assumed_vf = vect_vf_for_cost (loop_vinfo);
int npeel = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
/* Cost model disabled. */
if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
{
dump_printf_loc (MSG_NOTE, vect_location, "cost model disabled.\n");
*ret_min_profitable_niters = 0;
*ret_min_profitable_estimate = 0;
return;
}
/* Requires loop versioning tests to handle misalignment. */
if (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
{
/* FIXME: Make cost depend on complexity of individual check. */
unsigned len = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ();
(void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
vect_prologue);
dump_printf (MSG_NOTE,
"cost model: Adding cost of checks for loop "
"versioning to treat misalignment.\n");
}
/* Requires loop versioning with alias checks. */
if (LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo))
{
/* FIXME: Make cost depend on complexity of individual check. */
unsigned len = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo).length ();
(void) add_stmt_cost (target_cost_data, len, vector_stmt, NULL, 0,
vect_prologue);
len = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).length ();
if (len)
/* Count LEN - 1 ANDs and LEN comparisons. */
(void) add_stmt_cost (target_cost_data, len * 2 - 1, scalar_stmt,
NULL, 0, vect_prologue);
len = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).length ();
if (len)
{
/* Count LEN - 1 ANDs and LEN comparisons. */
unsigned int nstmts = len * 2 - 1;
/* +1 for each bias that needs adding. */
for (unsigned int i = 0; i < len; ++i)
if (!LOOP_VINFO_LOWER_BOUNDS (loop_vinfo)[i].unsigned_p)
nstmts += 1;
(void) add_stmt_cost (target_cost_data, nstmts, scalar_stmt,
NULL, 0, vect_prologue);
}
dump_printf (MSG_NOTE,
"cost model: Adding cost of checks for loop "
"versioning aliasing.\n");
}
/* Requires loop versioning with niter checks. */
if (LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo))
{
/* FIXME: Make cost depend on complexity of individual check. */
(void) add_stmt_cost (target_cost_data, 1, vector_stmt, NULL, 0,
vect_prologue);
dump_printf (MSG_NOTE,
"cost model: Adding cost of checks for loop "
"versioning niters.\n");
}
if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
(void) add_stmt_cost (target_cost_data, 1, cond_branch_taken, NULL, 0,
vect_prologue);
/* Count statements in scalar loop. Using this as scalar cost for a single
iteration for now.
TODO: Add outer loop support.
TODO: Consider assigning different costs to different scalar
statements. */
scalar_single_iter_cost
= LOOP_VINFO_SINGLE_SCALAR_ITERATION_COST (loop_vinfo);
/* Add additional cost for the peeled instructions in prologue and epilogue
loop. (For fully-masked loops there will be no peeling.)
FORNOW: If we don't know the value of peel_iters for prologue or epilogue
at compile-time - we assume it's vf/2 (the worst would be vf-1).
TODO: Build an expression that represents peel_iters for prologue and
epilogue to be used in a run-time test. */
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
{
peel_iters_prologue = 0;
peel_iters_epilogue = 0;
if (LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo))
{
/* We need to peel exactly one iteration. */
peel_iters_epilogue += 1;
stmt_info_for_cost *si;
int j;
FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
j, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
(void) add_stmt_cost (target_cost_data, si->count,
si->kind, stmt_info, si->misalign,
vect_epilogue);
}
}
}
else if (npeel < 0)
{
peel_iters_prologue = assumed_vf / 2;
dump_printf (MSG_NOTE, "cost model: "
"prologue peel iters set to vf/2.\n");
/* If peeling for alignment is unknown, loop bound of main loop becomes
unknown. */
peel_iters_epilogue = assumed_vf / 2;
dump_printf (MSG_NOTE, "cost model: "
"epilogue peel iters set to vf/2 because "
"peeling for alignment is unknown.\n");
/* If peeled iterations are unknown, count a taken branch and a not taken
branch per peeled loop. Even if scalar loop iterations are known,
vector iterations are not known since peeled prologue iterations are
not known. Hence guards remain the same. */
(void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
NULL, 0, vect_prologue);
(void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
NULL, 0, vect_prologue);
(void) add_stmt_cost (target_cost_data, 1, cond_branch_taken,
NULL, 0, vect_epilogue);
(void) add_stmt_cost (target_cost_data, 1, cond_branch_not_taken,
NULL, 0, vect_epilogue);
stmt_info_for_cost *si;
int j;
FOR_EACH_VEC_ELT (LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo), j, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
(void) add_stmt_cost (target_cost_data,
si->count * peel_iters_prologue,
si->kind, stmt_info, si->misalign,
vect_prologue);
(void) add_stmt_cost (target_cost_data,
si->count * peel_iters_epilogue,
si->kind, stmt_info, si->misalign,
vect_epilogue);
}
}
else
{
stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
stmt_info_for_cost *si;
int j;
void *data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
prologue_cost_vec.create (2);
epilogue_cost_vec.create (2);
peel_iters_prologue = npeel;
(void) vect_get_known_peeling_cost (loop_vinfo, peel_iters_prologue,
&peel_iters_epilogue,
&LOOP_VINFO_SCALAR_ITERATION_COST
(loop_vinfo),
&prologue_cost_vec,
&epilogue_cost_vec);
FOR_EACH_VEC_ELT (prologue_cost_vec, j, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
(void) add_stmt_cost (data, si->count, si->kind, stmt_info,
si->misalign, vect_prologue);
}
FOR_EACH_VEC_ELT (epilogue_cost_vec, j, si)
{
struct _stmt_vec_info *stmt_info
= si->stmt ? vinfo_for_stmt (si->stmt) : NULL;
(void) add_stmt_cost (data, si->count, si->kind, stmt_info,
si->misalign, vect_epilogue);
}
prologue_cost_vec.release ();
epilogue_cost_vec.release ();
}
/* FORNOW: The scalar outside cost is incremented in one of the
following ways:
1. The vectorizer checks for alignment and aliasing and generates
a condition that allows dynamic vectorization. A cost model
check is ANDED with the versioning condition. Hence scalar code
path now has the added cost of the versioning check.
if (cost > th & versioning_check)
jmp to vector code
Hence run-time scalar is incremented by not-taken branch cost.
2. The vectorizer then checks if a prologue is required. If the
cost model check was not done before during versioning, it has to
be done before the prologue check.
if (cost <= th)
prologue = scalar_iters
if (prologue == 0)
jmp to vector code
else
execute prologue
if (prologue == num_iters)
go to exit
Hence the run-time scalar cost is incremented by a taken branch,
plus a not-taken branch, plus a taken branch cost.
3. The vectorizer then checks if an epilogue is required. If the
cost model check was not done before during prologue check, it
has to be done with the epilogue check.
if (prologue == 0)
jmp to vector code
else
execute prologue
if (prologue == num_iters)
go to exit
vector code:
if ((cost <= th) | (scalar_iters-prologue-epilogue == 0))
jmp to epilogue
Hence the run-time scalar cost should be incremented by 2 taken
branches.
TODO: The back end may reorder the BBS's differently and reverse
conditions/branch directions. Change the estimates below to
something more reasonable. */
/* If the number of iterations is known and we do not do versioning, we can
decide whether to vectorize at compile time. Hence the scalar version
do not carry cost model guard costs. */
if (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
|| LOOP_REQUIRES_VERSIONING (loop_vinfo))
{
/* Cost model check occurs at versioning. */
if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
scalar_outside_cost += vect_get_stmt_cost (cond_branch_not_taken);
else
{
/* Cost model check occurs at prologue generation. */
if (LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) < 0)
scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken)
+ vect_get_stmt_cost (cond_branch_not_taken);
/* Cost model check occurs at epilogue generation. */
else
scalar_outside_cost += 2 * vect_get_stmt_cost (cond_branch_taken);
}
}
/* Complete the target-specific cost calculations. */
finish_cost (LOOP_VINFO_TARGET_COST_DATA (loop_vinfo), &vec_prologue_cost,
&vec_inside_cost, &vec_epilogue_cost);
vec_outside_cost = (int)(vec_prologue_cost + vec_epilogue_cost);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location, "Cost model analysis: \n");
dump_printf (MSG_NOTE, " Vector inside of loop cost: %d\n",
vec_inside_cost);
dump_printf (MSG_NOTE, " Vector prologue cost: %d\n",
vec_prologue_cost);
dump_printf (MSG_NOTE, " Vector epilogue cost: %d\n",
vec_epilogue_cost);
dump_printf (MSG_NOTE, " Scalar iteration cost: %d\n",
scalar_single_iter_cost);
dump_printf (MSG_NOTE, " Scalar outside cost: %d\n",
scalar_outside_cost);
dump_printf (MSG_NOTE, " Vector outside cost: %d\n",
vec_outside_cost);
dump_printf (MSG_NOTE, " prologue iterations: %d\n",
peel_iters_prologue);
dump_printf (MSG_NOTE, " epilogue iterations: %d\n",
peel_iters_epilogue);
}
/* Calculate number of iterations required to make the vector version
profitable, relative to the loop bodies only. The following condition
must hold true:
SIC * niters + SOC > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC
where
SIC = scalar iteration cost, VIC = vector iteration cost,
VOC = vector outside cost, VF = vectorization factor,
PL_ITERS = prologue iterations, EP_ITERS= epilogue iterations
SOC = scalar outside cost for run time cost model check. */
if ((scalar_single_iter_cost * assumed_vf) > (int) vec_inside_cost)
{
min_profitable_iters = ((vec_outside_cost - scalar_outside_cost)
* assumed_vf
- vec_inside_cost * peel_iters_prologue
- vec_inside_cost * peel_iters_epilogue);
if (min_profitable_iters <= 0)
min_profitable_iters = 0;
else
{
min_profitable_iters /= ((scalar_single_iter_cost * assumed_vf)
- vec_inside_cost);
if ((scalar_single_iter_cost * assumed_vf * min_profitable_iters)
<= (((int) vec_inside_cost * min_profitable_iters)
+ (((int) vec_outside_cost - scalar_outside_cost)
* assumed_vf)))
min_profitable_iters++;
}
}
/* vector version will never be profitable. */
else
{
if (LOOP_VINFO_LOOP (loop_vinfo)->force_vectorize)
warning_at (vect_location, OPT_Wopenmp_simd, "vectorization "
"did not happen for a simd loop");
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"cost model: the vector iteration cost = %d "
"divided by the scalar iteration cost = %d "
"is greater or equal to the vectorization factor = %d"
".\n",
vec_inside_cost, scalar_single_iter_cost, assumed_vf);
*ret_min_profitable_niters = -1;
*ret_min_profitable_estimate = -1;
return;
}
dump_printf (MSG_NOTE,
" Calculated minimum iters for profitability: %d\n",
min_profitable_iters);
if (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
&& min_profitable_iters < (assumed_vf + peel_iters_prologue))
/* We want the vectorized loop to execute at least once. */
min_profitable_iters = assumed_vf + peel_iters_prologue;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
" Runtime profitability threshold = %d\n",
min_profitable_iters);
*ret_min_profitable_niters = min_profitable_iters;
/* Calculate number of iterations required to make the vector version
profitable, relative to the loop bodies only.
Non-vectorized variant is SIC * niters and it must win over vector
variant on the expected loop trip count. The following condition must hold true:
SIC * niters > VIC * ((niters-PL_ITERS-EP_ITERS)/VF) + VOC + SOC */
if (vec_outside_cost <= 0)
min_profitable_estimate = 0;
else
{
min_profitable_estimate = ((vec_outside_cost + scalar_outside_cost)
* assumed_vf
- vec_inside_cost * peel_iters_prologue
- vec_inside_cost * peel_iters_epilogue)
/ ((scalar_single_iter_cost * assumed_vf)
- vec_inside_cost);
}
min_profitable_estimate = MAX (min_profitable_estimate, min_profitable_iters);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
" Static estimate profitability threshold = %d\n",
min_profitable_estimate);
*ret_min_profitable_estimate = min_profitable_estimate;
}
/* Writes into SEL a mask for a vec_perm, equivalent to a vec_shr by OFFSET
vector elements (not bits) for a vector with NELT elements. */
static void
calc_vec_perm_mask_for_shift (unsigned int offset, unsigned int nelt,
vec_perm_builder *sel)
{
/* The encoding is a single stepped pattern. Any wrap-around is handled
by vec_perm_indices. */
sel->new_vector (nelt, 1, 3);
for (unsigned int i = 0; i < 3; i++)
sel->quick_push (i + offset);
}
/* Checks whether the target supports whole-vector shifts for vectors of mode
MODE. This is the case if _either_ the platform handles vec_shr_optab, _or_
it supports vec_perm_const with masks for all necessary shift amounts. */
static bool
have_whole_vector_shift (machine_mode mode)
{
if (optab_handler (vec_shr_optab, mode) != CODE_FOR_nothing)
return true;
/* Variable-length vectors should be handled via the optab. */
unsigned int nelt;
if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
return false;
vec_perm_builder sel;
vec_perm_indices indices;
for (unsigned int i = nelt / 2; i >= 1; i /= 2)
{
calc_vec_perm_mask_for_shift (i, nelt, &sel);
indices.new_vector (sel, 2, nelt);
if (!can_vec_perm_const_p (mode, indices, false))
return false;
}
return true;
}
/* TODO: Close dependency between vect_model_*_cost and vectorizable_*
functions. Design better to avoid maintenance issues. */
/* Function vect_model_reduction_cost.
Models cost for a reduction operation, including the vector ops
generated within the strip-mine loop, the initial definition before
the loop, and the epilogue code that must be generated. */
static void
vect_model_reduction_cost (stmt_vec_info stmt_info, internal_fn reduc_fn,
int ncopies)
{
int prologue_cost = 0, epilogue_cost = 0, inside_cost;
enum tree_code code;
optab optab;
tree vectype;
gimple *orig_stmt;
machine_mode mode;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = NULL;
void *target_cost_data;
if (loop_vinfo)
{
loop = LOOP_VINFO_LOOP (loop_vinfo);
target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
}
else
target_cost_data = BB_VINFO_TARGET_COST_DATA (STMT_VINFO_BB_VINFO (stmt_info));
/* Condition reductions generate two reductions in the loop. */
vect_reduction_type reduction_type
= STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
if (reduction_type == COND_REDUCTION)
ncopies *= 2;
vectype = STMT_VINFO_VECTYPE (stmt_info);
mode = TYPE_MODE (vectype);
orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
if (!orig_stmt)
orig_stmt = STMT_VINFO_STMT (stmt_info);
code = gimple_assign_rhs_code (orig_stmt);
if (reduction_type == EXTRACT_LAST_REDUCTION
|| reduction_type == FOLD_LEFT_REDUCTION)
{
/* No extra instructions needed in the prologue. */
prologue_cost = 0;
if (reduction_type == EXTRACT_LAST_REDUCTION || reduc_fn != IFN_LAST)
/* Count one reduction-like operation per vector. */
inside_cost = add_stmt_cost (target_cost_data, ncopies, vec_to_scalar,
stmt_info, 0, vect_body);
else
{
/* Use NELEMENTS extracts and NELEMENTS scalar ops. */
unsigned int nelements = ncopies * vect_nunits_for_cost (vectype);
inside_cost = add_stmt_cost (target_cost_data, nelements,
vec_to_scalar, stmt_info, 0,
vect_body);
inside_cost += add_stmt_cost (target_cost_data, nelements,
scalar_stmt, stmt_info, 0,
vect_body);
}
}
else
{
/* Add in cost for initial definition.
For cond reduction we have four vectors: initial index, step,
initial result of the data reduction, initial value of the index
reduction. */
int prologue_stmts = reduction_type == COND_REDUCTION ? 4 : 1;
prologue_cost += add_stmt_cost (target_cost_data, prologue_stmts,
scalar_to_vec, stmt_info, 0,
vect_prologue);
/* Cost of reduction op inside loop. */
inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
stmt_info, 0, vect_body);
}
/* Determine cost of epilogue code.
We have a reduction operator that will reduce the vector in one statement.
Also requires scalar extract. */
if (!loop || !nested_in_vect_loop_p (loop, orig_stmt))
{
if (reduc_fn != IFN_LAST)
{
if (reduction_type == COND_REDUCTION)
{
/* An EQ stmt and an COND_EXPR stmt. */
epilogue_cost += add_stmt_cost (target_cost_data, 2,
vector_stmt, stmt_info, 0,
vect_epilogue);
/* Reduction of the max index and a reduction of the found
values. */
epilogue_cost += add_stmt_cost (target_cost_data, 2,
vec_to_scalar, stmt_info, 0,
vect_epilogue);
/* A broadcast of the max value. */
epilogue_cost += add_stmt_cost (target_cost_data, 1,
scalar_to_vec, stmt_info, 0,
vect_epilogue);
}
else
{
epilogue_cost += add_stmt_cost (target_cost_data, 1, vector_stmt,
stmt_info, 0, vect_epilogue);
epilogue_cost += add_stmt_cost (target_cost_data, 1,
vec_to_scalar, stmt_info, 0,
vect_epilogue);
}
}
else if (reduction_type == COND_REDUCTION)
{
unsigned estimated_nunits = vect_nunits_for_cost (vectype);
/* Extraction of scalar elements. */
epilogue_cost += add_stmt_cost (target_cost_data,
2 * estimated_nunits,
vec_to_scalar, stmt_info, 0,
vect_epilogue);
/* Scalar max reductions via COND_EXPR / MAX_EXPR. */
epilogue_cost += add_stmt_cost (target_cost_data,
2 * estimated_nunits - 3,
scalar_stmt, stmt_info, 0,
vect_epilogue);
}
else if (reduction_type == EXTRACT_LAST_REDUCTION
|| reduction_type == FOLD_LEFT_REDUCTION)
/* No extra instructions need in the epilogue. */
;
else
{
int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
tree bitsize =
TYPE_SIZE (TREE_TYPE (gimple_assign_lhs (orig_stmt)));
int element_bitsize = tree_to_uhwi (bitsize);
int nelements = vec_size_in_bits / element_bitsize;
if (code == COND_EXPR)
code = MAX_EXPR;
optab = optab_for_tree_code (code, vectype, optab_default);
/* We have a whole vector shift available. */
if (optab != unknown_optab
&& VECTOR_MODE_P (mode)
&& optab_handler (optab, mode) != CODE_FOR_nothing
&& have_whole_vector_shift (mode))
{
/* Final reduction via vector shifts and the reduction operator.
Also requires scalar extract. */
epilogue_cost += add_stmt_cost (target_cost_data,
exact_log2 (nelements) * 2,
vector_stmt, stmt_info, 0,
vect_epilogue);
epilogue_cost += add_stmt_cost (target_cost_data, 1,
vec_to_scalar, stmt_info, 0,
vect_epilogue);
}
else
/* Use extracts and reduction op for final reduction. For N
elements, we have N extracts and N-1 reduction ops. */
epilogue_cost += add_stmt_cost (target_cost_data,
nelements + nelements - 1,
vector_stmt, stmt_info, 0,
vect_epilogue);
}
}
if (dump_enabled_p ())
dump_printf (MSG_NOTE,
"vect_model_reduction_cost: inside_cost = %d, "
"prologue_cost = %d, epilogue_cost = %d .\n", inside_cost,
prologue_cost, epilogue_cost);
}
/* Function vect_model_induction_cost.
Models cost for induction operations. */
static void
vect_model_induction_cost (stmt_vec_info stmt_info, int ncopies)
{
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
void *target_cost_data = LOOP_VINFO_TARGET_COST_DATA (loop_vinfo);
unsigned inside_cost, prologue_cost;
if (PURE_SLP_STMT (stmt_info))
return;
/* loop cost for vec_loop. */
inside_cost = add_stmt_cost (target_cost_data, ncopies, vector_stmt,
stmt_info, 0, vect_body);
/* prologue cost for vec_init and vec_step. */
prologue_cost = add_stmt_cost (target_cost_data, 2, scalar_to_vec,
stmt_info, 0, vect_prologue);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"vect_model_induction_cost: inside_cost = %d, "
"prologue_cost = %d .\n", inside_cost, prologue_cost);
}
/* Function get_initial_def_for_reduction
Input:
STMT - a stmt that performs a reduction operation in the loop.
INIT_VAL - the initial value of the reduction variable
Output:
ADJUSTMENT_DEF - a tree that holds a value to be added to the final result
of the reduction (used for adjusting the epilog - see below).
Return a vector variable, initialized according to the operation that STMT
performs. This vector will be used as the initial value of the
vector of partial results.
Option1 (adjust in epilog): Initialize the vector as follows:
add/bit or/xor: [0,0,...,0,0]
mult/bit and: [1,1,...,1,1]
min/max/cond_expr: [init_val,init_val,..,init_val,init_val]
and when necessary (e.g. add/mult case) let the caller know
that it needs to adjust the result by init_val.
Option2: Initialize the vector as follows:
add/bit or/xor: [init_val,0,0,...,0]
mult/bit and: [init_val,1,1,...,1]
min/max/cond_expr: [init_val,init_val,...,init_val]
and no adjustments are needed.
For example, for the following code:
s = init_val;
for (i=0;i<n;i++)
s = s + a[i];
STMT is 's = s + a[i]', and the reduction variable is 's'.
For a vector of 4 units, we want to return either [0,0,0,init_val],
or [0,0,0,0] and let the caller know that it needs to adjust
the result at the end by 'init_val'.
FORNOW, we are using the 'adjust in epilog' scheme, because this way the
initialization vector is simpler (same element in all entries), if
ADJUSTMENT_DEF is not NULL, and Option2 otherwise.
A cost model should help decide between these two schemes. */
tree
get_initial_def_for_reduction (gimple *stmt, tree init_val,
tree *adjustment_def)
{
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_vinfo);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree scalar_type = TREE_TYPE (init_val);
tree vectype = get_vectype_for_scalar_type (scalar_type);
enum tree_code code = gimple_assign_rhs_code (stmt);
tree def_for_init;
tree init_def;
bool nested_in_vect_loop = false;
REAL_VALUE_TYPE real_init_val = dconst0;
int int_init_val = 0;
gimple *def_stmt = NULL;
gimple_seq stmts = NULL;
gcc_assert (vectype);
gcc_assert (POINTER_TYPE_P (scalar_type) || INTEGRAL_TYPE_P (scalar_type)
|| SCALAR_FLOAT_TYPE_P (scalar_type));
if (nested_in_vect_loop_p (loop, stmt))
nested_in_vect_loop = true;
else
gcc_assert (loop == (gimple_bb (stmt))->loop_father);
/* In case of double reduction we only create a vector variable to be put
in the reduction phi node. The actual statement creation is done in
vect_create_epilog_for_reduction. */
if (adjustment_def && nested_in_vect_loop
&& TREE_CODE (init_val) == SSA_NAME
&& (def_stmt = SSA_NAME_DEF_STMT (init_val))
&& gimple_code (def_stmt) == GIMPLE_PHI
&& flow_bb_inside_loop_p (loop, gimple_bb (def_stmt))
&& vinfo_for_stmt (def_stmt)
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_stmt))
== vect_double_reduction_def)
{
*adjustment_def = NULL;
return vect_create_destination_var (init_val, vectype);
}
vect_reduction_type reduction_type
= STMT_VINFO_VEC_REDUCTION_TYPE (stmt_vinfo);
/* In case of a nested reduction do not use an adjustment def as
that case is not supported by the epilogue generation correctly
if ncopies is not one. */
if (adjustment_def && nested_in_vect_loop)
{
*adjustment_def = NULL;
return vect_get_vec_def_for_operand (init_val, stmt);
}
switch (code)
{
case WIDEN_SUM_EXPR:
case DOT_PROD_EXPR:
case SAD_EXPR:
case PLUS_EXPR:
case MINUS_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case MULT_EXPR:
case BIT_AND_EXPR:
{
/* ADJUSTMENT_DEF is NULL when called from
vect_create_epilog_for_reduction to vectorize double reduction. */
if (adjustment_def)
*adjustment_def = init_val;
if (code == MULT_EXPR)
{
real_init_val = dconst1;
int_init_val = 1;
}
if (code == BIT_AND_EXPR)
int_init_val = -1;
if (SCALAR_FLOAT_TYPE_P (scalar_type))
def_for_init = build_real (scalar_type, real_init_val);
else
def_for_init = build_int_cst (scalar_type, int_init_val);
if (adjustment_def)
/* Option1: the first element is '0' or '1' as well. */
init_def = gimple_build_vector_from_val (&stmts, vectype,
def_for_init);
else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant ())
{
/* Option2 (variable length): the first element is INIT_VAL. */
init_def = build_vector_from_val (vectype, def_for_init);
gcall *call = gimple_build_call_internal (IFN_VEC_SHL_INSERT,
2, init_def, init_val);
init_def = make_ssa_name (vectype);
gimple_call_set_lhs (call, init_def);
gimple_seq_add_stmt (&stmts, call);
}
else
{
/* Option2: the first element is INIT_VAL. */
tree_vector_builder elts (vectype, 1, 2);
elts.quick_push (init_val);
elts.quick_push (def_for_init);
init_def = gimple_build_vector (&stmts, &elts);
}
}
break;
case MIN_EXPR:
case MAX_EXPR:
case COND_EXPR:
{
if (adjustment_def)
{
*adjustment_def = NULL_TREE;
if (reduction_type != COND_REDUCTION
&& reduction_type != EXTRACT_LAST_REDUCTION)
{
init_def = vect_get_vec_def_for_operand (init_val, stmt);
break;
}
}
init_val = gimple_convert (&stmts, TREE_TYPE (vectype), init_val);
init_def = gimple_build_vector_from_val (&stmts, vectype, init_val);
}
break;
default:
gcc_unreachable ();
}
if (stmts)
gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts);
return init_def;
}
/* Get at the initial defs for the reduction PHIs in SLP_NODE.
NUMBER_OF_VECTORS is the number of vector defs to create.
If NEUTRAL_OP is nonnull, introducing extra elements of that
value will not change the result. */
static void
get_initial_defs_for_reduction (slp_tree slp_node,
vec<tree> *vec_oprnds,
unsigned int number_of_vectors,
bool reduc_chain, tree neutral_op)
{
vec<gimple *> stmts = SLP_TREE_SCALAR_STMTS (slp_node);
gimple *stmt = stmts[0];
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
unsigned HOST_WIDE_INT nunits;
unsigned j, number_of_places_left_in_vector;
tree vector_type;
tree vop;
int group_size = stmts.length ();
unsigned int vec_num, i;
unsigned number_of_copies = 1;
vec<tree> voprnds;
voprnds.create (number_of_vectors);
struct loop *loop;
auto_vec<tree, 16> permute_results;
vector_type = STMT_VINFO_VECTYPE (stmt_vinfo);
gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
loop = (gimple_bb (stmt))->loop_father;
gcc_assert (loop);
edge pe = loop_preheader_edge (loop);
gcc_assert (!reduc_chain || neutral_op);
/* NUMBER_OF_COPIES is the number of times we need to use the same values in
created vectors. It is greater than 1 if unrolling is performed.
For example, we have two scalar operands, s1 and s2 (e.g., group of
strided accesses of size two), while NUNITS is four (i.e., four scalars
of this type can be packed in a vector). The output vector will contain
two copies of each scalar operand: {s1, s2, s1, s2}. (NUMBER_OF_COPIES
will be 2).
If GROUP_SIZE > NUNITS, the scalars will be split into several vectors
containing the operands.
For example, NUNITS is four as before, and the group size is 8
(s1, s2, ..., s8). We will create two vectors {s1, s2, s3, s4} and
{s5, s6, s7, s8}. */
if (!TYPE_VECTOR_SUBPARTS (vector_type).is_constant (&nunits))
nunits = group_size;
number_of_copies = nunits * number_of_vectors / group_size;
number_of_places_left_in_vector = nunits;
bool constant_p = true;
tree_vector_builder elts (vector_type, nunits, 1);
elts.quick_grow (nunits);
for (j = 0; j < number_of_copies; j++)
{
for (i = group_size - 1; stmts.iterate (i, &stmt); i--)
{
tree op;
/* Get the def before the loop. In reduction chain we have only
one initial value. */
if ((j != (number_of_copies - 1)
|| (reduc_chain && i != 0))
&& neutral_op)
op = neutral_op;
else
op = PHI_ARG_DEF_FROM_EDGE (stmt, pe);
/* Create 'vect_ = {op0,op1,...,opn}'. */
number_of_places_left_in_vector--;
elts[number_of_places_left_in_vector] = op;
if (!CONSTANT_CLASS_P (op))
constant_p = false;
if (number_of_places_left_in_vector == 0)
{
gimple_seq ctor_seq = NULL;
tree init;
if (constant_p && !neutral_op
? multiple_p (TYPE_VECTOR_SUBPARTS (vector_type), nunits)
: known_eq (TYPE_VECTOR_SUBPARTS (vector_type), nunits))
/* Build the vector directly from ELTS. */
init = gimple_build_vector (&ctor_seq, &elts);
else if (neutral_op)
{
/* Build a vector of the neutral value and shift the
other elements into place. */
init = gimple_build_vector_from_val (&ctor_seq, vector_type,
neutral_op);
int k = nunits;
while (k > 0 && elts[k - 1] == neutral_op)
k -= 1;
while (k > 0)
{
k -= 1;
gcall *call = gimple_build_call_internal
(IFN_VEC_SHL_INSERT, 2, init, elts[k]);
init = make_ssa_name (vector_type);
gimple_call_set_lhs (call, init);
gimple_seq_add_stmt (&ctor_seq, call);
}
}
else
{
/* First time round, duplicate ELTS to fill the
required number of vectors, then cherry pick the
appropriate result for each iteration. */
if (vec_oprnds->is_empty ())
duplicate_and_interleave (&ctor_seq, vector_type, elts,
number_of_vectors,
permute_results);
init = permute_results[number_of_vectors - j - 1];
}
if (ctor_seq != NULL)
gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
voprnds.quick_push (init);
number_of_places_left_in_vector = nunits;
elts.new_vector (vector_type, nunits, 1);
elts.quick_grow (nunits);
constant_p = true;
}
}
}
/* Since the vectors are created in the reverse order, we should invert
them. */
vec_num = voprnds.length ();
for (j = vec_num; j != 0; j--)
{
vop = voprnds[j - 1];
vec_oprnds->quick_push (vop);
}
voprnds.release ();
/* In case that VF is greater than the unrolling factor needed for the SLP
group of stmts, NUMBER_OF_VECTORS to be created is greater than
NUMBER_OF_SCALARS/NUNITS or NUNITS/NUMBER_OF_SCALARS, and hence we have
to replicate the vectors. */
tree neutral_vec = NULL;
while (number_of_vectors > vec_oprnds->length ())
{
if (neutral_op)
{
if (!neutral_vec)
{
gimple_seq ctor_seq = NULL;
neutral_vec = gimple_build_vector_from_val
(&ctor_seq, vector_type, neutral_op);
if (ctor_seq != NULL)
gsi_insert_seq_on_edge_immediate (pe, ctor_seq);
}
vec_oprnds->quick_push (neutral_vec);
}
else
{
for (i = 0; vec_oprnds->iterate (i, &vop) && i < vec_num; i++)
vec_oprnds->quick_push (vop);
}
}
}
/* Function vect_create_epilog_for_reduction
Create code at the loop-epilog to finalize the result of a reduction
computation.
VECT_DEFS is list of vector of partial results, i.e., the lhs's of vector
reduction statements.
STMT is the scalar reduction stmt that is being vectorized.
NCOPIES is > 1 in case the vectorization factor (VF) is bigger than the
number of elements that we can fit in a vectype (nunits). In this case
we have to generate more than one vector stmt - i.e - we need to "unroll"
the vector stmt by a factor VF/nunits. For more details see documentation
in vectorizable_operation.
REDUC_FN is the internal function for the epilog reduction.
REDUCTION_PHIS is a list of the phi-nodes that carry the reduction
computation.
REDUC_INDEX is the index of the operand in the right hand side of the
statement that is defined by REDUCTION_PHI.
DOUBLE_REDUC is TRUE if double reduction phi nodes should be handled.
SLP_NODE is an SLP node containing a group of reduction statements. The
first one in this group is STMT.
INDUC_VAL is for INTEGER_INDUC_COND_REDUCTION the value to use for the case
when the COND_EXPR is never true in the loop. For MAX_EXPR, it needs to
be smaller than any value of the IV in the loop, for MIN_EXPR larger than
any value of the IV in the loop.
INDUC_CODE is the code for epilog reduction if INTEGER_INDUC_COND_REDUCTION.
NEUTRAL_OP is the value given by neutral_op_for_slp_reduction; it is
null if this is not an SLP reduction
This function:
1. Creates the reduction def-use cycles: sets the arguments for
REDUCTION_PHIS:
The loop-entry argument is the vectorized initial-value of the reduction.
The loop-latch argument is taken from VECT_DEFS - the vector of partial
sums.
2. "Reduces" each vector of partial results VECT_DEFS into a single result,
by calling the function specified by REDUC_FN if available, or by
other means (whole-vector shifts or a scalar loop).
The function also creates a new phi node at the loop exit to preserve
loop-closed form, as illustrated below.
The flow at the entry to this function:
loop:
vec_def = phi <null, null> # REDUCTION_PHI
VECT_DEF = vector_stmt # vectorized form of STMT
s_loop = scalar_stmt # (scalar) STMT
loop_exit:
s_out0 = phi <s_loop> # (scalar) EXIT_PHI
use <s_out0>
use <s_out0>
The above is transformed by this function into:
loop:
vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
VECT_DEF = vector_stmt # vectorized form of STMT
s_loop = scalar_stmt # (scalar) STMT
loop_exit:
s_out0 = phi <s_loop> # (scalar) EXIT_PHI
v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
v_out2 = reduce <v_out1>
s_out3 = extract_field <v_out2, 0>
s_out4 = adjust_result <s_out3>
use <s_out4>
use <s_out4>
*/
static void
vect_create_epilog_for_reduction (vec<tree> vect_defs, gimple *stmt,
gimple *reduc_def_stmt,
int ncopies, internal_fn reduc_fn,
vec<gimple *> reduction_phis,
bool double_reduc,
slp_tree slp_node,
slp_instance slp_node_instance,
tree induc_val, enum tree_code induc_code,
tree neutral_op)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
stmt_vec_info prev_phi_info;
tree vectype;
machine_mode mode;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *outer_loop = NULL;
basic_block exit_bb;
tree scalar_dest;
tree scalar_type;
gimple *new_phi = NULL, *phi;
gimple_stmt_iterator exit_gsi;
tree vec_dest;
tree new_temp = NULL_TREE, new_dest, new_name, new_scalar_dest;
gimple *epilog_stmt = NULL;
enum tree_code code = gimple_assign_rhs_code (stmt);
gimple *exit_phi;
tree bitsize;
tree adjustment_def = NULL;
tree vec_initial_def = NULL;
tree expr, def, initial_def = NULL;
tree orig_name, scalar_result;
imm_use_iterator imm_iter, phi_imm_iter;
use_operand_p use_p, phi_use_p;
gimple *use_stmt, *orig_stmt, *reduction_phi = NULL;
bool nested_in_vect_loop = false;
auto_vec<gimple *> new_phis;
auto_vec<gimple *> inner_phis;
enum vect_def_type dt = vect_unknown_def_type;
int j, i;
auto_vec<tree> scalar_results;
unsigned int group_size = 1, k, ratio;
auto_vec<tree> vec_initial_defs;
auto_vec<gimple *> phis;
bool slp_reduc = false;
bool direct_slp_reduc;
tree new_phi_result;
gimple *inner_phi = NULL;
tree induction_index = NULL_TREE;
if (slp_node)
group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
if (nested_in_vect_loop_p (loop, stmt))
{
outer_loop = loop;
loop = loop->inner;
nested_in_vect_loop = true;
gcc_assert (!slp_node);
}
vectype = STMT_VINFO_VECTYPE (stmt_info);
gcc_assert (vectype);
mode = TYPE_MODE (vectype);
/* 1. Create the reduction def-use cycle:
Set the arguments of REDUCTION_PHIS, i.e., transform
loop:
vec_def = phi <null, null> # REDUCTION_PHI
VECT_DEF = vector_stmt # vectorized form of STMT
...
into:
loop:
vec_def = phi <vec_init, VECT_DEF> # REDUCTION_PHI
VECT_DEF = vector_stmt # vectorized form of STMT
...
(in case of SLP, do it for all the phis). */
/* Get the loop-entry arguments. */
enum vect_def_type initial_def_dt = vect_unknown_def_type;
if (slp_node)
{
unsigned vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
vec_initial_defs.reserve (vec_num);
get_initial_defs_for_reduction (slp_node_instance->reduc_phis,
&vec_initial_defs, vec_num,
GROUP_FIRST_ELEMENT (stmt_info),
neutral_op);
}
else
{
/* Get at the scalar def before the loop, that defines the initial value
of the reduction variable. */
gimple *def_stmt;
initial_def = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
loop_preheader_edge (loop));
/* Optimize: if initial_def is for REDUC_MAX smaller than the base
and we can't use zero for induc_val, use initial_def. Similarly
for REDUC_MIN and initial_def larger than the base. */
if (TREE_CODE (initial_def) == INTEGER_CST
&& (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== INTEGER_INDUC_COND_REDUCTION)
&& !integer_zerop (induc_val)
&& ((induc_code == MAX_EXPR
&& tree_int_cst_lt (initial_def, induc_val))
|| (induc_code == MIN_EXPR
&& tree_int_cst_lt (induc_val, initial_def))))
induc_val = initial_def;
vect_is_simple_use (initial_def, loop_vinfo, &def_stmt, &initial_def_dt);
vec_initial_def = get_initial_def_for_reduction (stmt, initial_def,
&adjustment_def);
vec_initial_defs.create (1);
vec_initial_defs.quick_push (vec_initial_def);
}
/* Set phi nodes arguments. */
FOR_EACH_VEC_ELT (reduction_phis, i, phi)
{
tree vec_init_def = vec_initial_defs[i];
tree def = vect_defs[i];
for (j = 0; j < ncopies; j++)
{
if (j != 0)
{
phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
if (nested_in_vect_loop)
vec_init_def
= vect_get_vec_def_for_stmt_copy (initial_def_dt,
vec_init_def);
}
/* Set the loop-entry arg of the reduction-phi. */
if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== INTEGER_INDUC_COND_REDUCTION)
{
/* Initialise the reduction phi to zero. This prevents initial
values of non-zero interferring with the reduction op. */
gcc_assert (ncopies == 1);
gcc_assert (i == 0);
tree vec_init_def_type = TREE_TYPE (vec_init_def);
tree induc_val_vec
= build_vector_from_val (vec_init_def_type, induc_val);
add_phi_arg (as_a <gphi *> (phi), induc_val_vec,
loop_preheader_edge (loop), UNKNOWN_LOCATION);
}
else
add_phi_arg (as_a <gphi *> (phi), vec_init_def,
loop_preheader_edge (loop), UNKNOWN_LOCATION);
/* Set the loop-latch arg for the reduction-phi. */
if (j > 0)
def = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type, def);
add_phi_arg (as_a <gphi *> (phi), def, loop_latch_edge (loop),
UNKNOWN_LOCATION);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"transform reduction: created def-use cycle: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, SSA_NAME_DEF_STMT (def), 0);
}
}
}
/* For cond reductions we want to create a new vector (INDEX_COND_EXPR)
which is updated with the current index of the loop for every match of
the original loop's cond_expr (VEC_STMT). This results in a vector
containing the last time the condition passed for that vector lane.
The first match will be a 1 to allow 0 to be used for non-matching
indexes. If there are no matches at all then the vector will be all
zeroes. */
if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION)
{
tree indx_before_incr, indx_after_incr;
poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype);
gimple *vec_stmt = STMT_VINFO_VEC_STMT (stmt_info);
gcc_assert (gimple_assign_rhs_code (vec_stmt) == VEC_COND_EXPR);
int scalar_precision
= GET_MODE_PRECISION (SCALAR_TYPE_MODE (TREE_TYPE (vectype)));
tree cr_index_scalar_type = make_unsigned_type (scalar_precision);
tree cr_index_vector_type = build_vector_type
(cr_index_scalar_type, TYPE_VECTOR_SUBPARTS (vectype));
/* First we create a simple vector induction variable which starts
with the values {1,2,3,...} (SERIES_VECT) and increments by the
vector size (STEP). */
/* Create a {1,2,3,...} vector. */
tree series_vect = build_index_vector (cr_index_vector_type, 1, 1);
/* Create a vector of the step value. */
tree step = build_int_cst (cr_index_scalar_type, nunits_out);
tree vec_step = build_vector_from_val (cr_index_vector_type, step);
/* Create an induction variable. */
gimple_stmt_iterator incr_gsi;
bool insert_after;
standard_iv_increment_position (loop, &incr_gsi, &insert_after);
create_iv (series_vect, vec_step, NULL_TREE, loop, &incr_gsi,
insert_after, &indx_before_incr, &indx_after_incr);
/* Next create a new phi node vector (NEW_PHI_TREE) which starts
filled with zeros (VEC_ZERO). */
/* Create a vector of 0s. */
tree zero = build_zero_cst (cr_index_scalar_type);
tree vec_zero = build_vector_from_val (cr_index_vector_type, zero);
/* Create a vector phi node. */
tree new_phi_tree = make_ssa_name (cr_index_vector_type);
new_phi = create_phi_node (new_phi_tree, loop->header);
set_vinfo_for_stmt (new_phi,
new_stmt_vec_info (new_phi, loop_vinfo));
add_phi_arg (as_a <gphi *> (new_phi), vec_zero,
loop_preheader_edge (loop), UNKNOWN_LOCATION);
/* Now take the condition from the loops original cond_expr
(VEC_STMT) and produce a new cond_expr (INDEX_COND_EXPR) which for
every match uses values from the induction variable
(INDEX_BEFORE_INCR) otherwise uses values from the phi node
(NEW_PHI_TREE).
Finally, we update the phi (NEW_PHI_TREE) to take the value of
the new cond_expr (INDEX_COND_EXPR). */
/* Duplicate the condition from vec_stmt. */
tree ccompare = unshare_expr (gimple_assign_rhs1 (vec_stmt));
/* Create a conditional, where the condition is taken from vec_stmt
(CCOMPARE), then is the induction index (INDEX_BEFORE_INCR) and
else is the phi (NEW_PHI_TREE). */
tree index_cond_expr = build3 (VEC_COND_EXPR, cr_index_vector_type,
ccompare, indx_before_incr,
new_phi_tree);
induction_index = make_ssa_name (cr_index_vector_type);
gimple *index_condition = gimple_build_assign (induction_index,
index_cond_expr);
gsi_insert_before (&incr_gsi, index_condition, GSI_SAME_STMT);
stmt_vec_info index_vec_info = new_stmt_vec_info (index_condition,
loop_vinfo);
STMT_VINFO_VECTYPE (index_vec_info) = cr_index_vector_type;
set_vinfo_for_stmt (index_condition, index_vec_info);
/* Update the phi with the vec cond. */
add_phi_arg (as_a <gphi *> (new_phi), induction_index,
loop_latch_edge (loop), UNKNOWN_LOCATION);
}
/* 2. Create epilog code.
The reduction epilog code operates across the elements of the vector
of partial results computed by the vectorized loop.
The reduction epilog code consists of:
step 1: compute the scalar result in a vector (v_out2)
step 2: extract the scalar result (s_out3) from the vector (v_out2)
step 3: adjust the scalar result (s_out3) if needed.
Step 1 can be accomplished using one the following three schemes:
(scheme 1) using reduc_fn, if available.
(scheme 2) using whole-vector shifts, if available.
(scheme 3) using a scalar loop. In this case steps 1+2 above are
combined.
The overall epilog code looks like this:
s_out0 = phi <s_loop> # original EXIT_PHI
v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
v_out2 = reduce <v_out1> # step 1
s_out3 = extract_field <v_out2, 0> # step 2
s_out4 = adjust_result <s_out3> # step 3
(step 3 is optional, and steps 1 and 2 may be combined).
Lastly, the uses of s_out0 are replaced by s_out4. */
/* 2.1 Create new loop-exit-phis to preserve loop-closed form:
v_out1 = phi <VECT_DEF>
Store them in NEW_PHIS. */
exit_bb = single_exit (loop)->dest;
prev_phi_info = NULL;
new_phis.create (vect_defs.length ());
FOR_EACH_VEC_ELT (vect_defs, i, def)
{
for (j = 0; j < ncopies; j++)
{
tree new_def = copy_ssa_name (def);
phi = create_phi_node (new_def, exit_bb);
set_vinfo_for_stmt (phi, new_stmt_vec_info (phi, loop_vinfo));
if (j == 0)
new_phis.quick_push (phi);
else
{
def = vect_get_vec_def_for_stmt_copy (dt, def);
STMT_VINFO_RELATED_STMT (prev_phi_info) = phi;
}
SET_PHI_ARG_DEF (phi, single_exit (loop)->dest_idx, def);
prev_phi_info = vinfo_for_stmt (phi);
}
}
/* The epilogue is created for the outer-loop, i.e., for the loop being
vectorized. Create exit phis for the outer loop. */
if (double_reduc)
{
loop = outer_loop;
exit_bb = single_exit (loop)->dest;
inner_phis.create (vect_defs.length ());
FOR_EACH_VEC_ELT (new_phis, i, phi)
{
tree new_result = copy_ssa_name (PHI_RESULT (phi));
gphi *outer_phi = create_phi_node (new_result, exit_bb);
SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
PHI_RESULT (phi));
set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
loop_vinfo));
inner_phis.quick_push (phi);
new_phis[i] = outer_phi;
prev_phi_info = vinfo_for_stmt (outer_phi);
while (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi)))
{
phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (phi));
new_result = copy_ssa_name (PHI_RESULT (phi));
outer_phi = create_phi_node (new_result, exit_bb);
SET_PHI_ARG_DEF (outer_phi, single_exit (loop)->dest_idx,
PHI_RESULT (phi));
set_vinfo_for_stmt (outer_phi, new_stmt_vec_info (outer_phi,
loop_vinfo));
STMT_VINFO_RELATED_STMT (prev_phi_info) = outer_phi;
prev_phi_info = vinfo_for_stmt (outer_phi);
}
}
}
exit_gsi = gsi_after_labels (exit_bb);
/* 2.2 Get the relevant tree-code to use in the epilog for schemes 2,3
(i.e. when reduc_fn is not available) and in the final adjustment
code (if needed). Also get the original scalar reduction variable as
defined in the loop. In case STMT is a "pattern-stmt" (i.e. - it
represents a reduction pattern), the tree-code and scalar-def are
taken from the original stmt that the pattern-stmt (STMT) replaces.
Otherwise (it is a regular reduction) - the tree-code and scalar-def
are taken from STMT. */
orig_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
if (!orig_stmt)
{
/* Regular reduction */
orig_stmt = stmt;
}
else
{
/* Reduction pattern */
stmt_vec_info stmt_vinfo = vinfo_for_stmt (orig_stmt);
gcc_assert (STMT_VINFO_IN_PATTERN_P (stmt_vinfo));
gcc_assert (STMT_VINFO_RELATED_STMT (stmt_vinfo) == stmt);
}
code = gimple_assign_rhs_code (orig_stmt);
/* For MINUS_EXPR the initial vector is [init_val,0,...,0], therefore,
partial results are added and not subtracted. */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
scalar_dest = gimple_assign_lhs (orig_stmt);
scalar_type = TREE_TYPE (scalar_dest);
scalar_results.create (group_size);
new_scalar_dest = vect_create_destination_var (scalar_dest, NULL);
bitsize = TYPE_SIZE (scalar_type);
/* In case this is a reduction in an inner-loop while vectorizing an outer
loop - we don't need to extract a single scalar result at the end of the
inner-loop (unless it is double reduction, i.e., the use of reduction is
outside the outer-loop). The final vector of partial results will be used
in the vectorized outer-loop, or reduced to a scalar result at the end of
the outer-loop. */
if (nested_in_vect_loop && !double_reduc)
goto vect_finalize_reduction;
/* SLP reduction without reduction chain, e.g.,
# a1 = phi <a2, a0>
# b1 = phi <b2, b0>
a2 = operation (a1)
b2 = operation (b1) */
slp_reduc = (slp_node && !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)));
/* True if we should implement SLP_REDUC using native reduction operations
instead of scalar operations. */
direct_slp_reduc = (reduc_fn != IFN_LAST
&& slp_reduc
&& !TYPE_VECTOR_SUBPARTS (vectype).is_constant ());
/* In case of reduction chain, e.g.,
# a1 = phi <a3, a0>
a2 = operation (a1)
a3 = operation (a2),
we may end up with more than one vector result. Here we reduce them to
one vector. */
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)) || direct_slp_reduc)
{
tree first_vect = PHI_RESULT (new_phis[0]);
gassign *new_vec_stmt = NULL;
vec_dest = vect_create_destination_var (scalar_dest, vectype);
for (k = 1; k < new_phis.length (); k++)
{
gimple *next_phi = new_phis[k];
tree second_vect = PHI_RESULT (next_phi);
tree tem = make_ssa_name (vec_dest, new_vec_stmt);
new_vec_stmt = gimple_build_assign (tem, code,
first_vect, second_vect);
gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
first_vect = tem;
}
new_phi_result = first_vect;
if (new_vec_stmt)
{
new_phis.truncate (0);
new_phis.safe_push (new_vec_stmt);
}
}
/* Likewise if we couldn't use a single defuse cycle. */
else if (ncopies > 1)
{
gcc_assert (new_phis.length () == 1);
tree first_vect = PHI_RESULT (new_phis[0]);
gassign *new_vec_stmt = NULL;
vec_dest = vect_create_destination_var (scalar_dest, vectype);
gimple *next_phi = new_phis[0];
for (int k = 1; k < ncopies; ++k)
{
next_phi = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (next_phi));
tree second_vect = PHI_RESULT (next_phi);
tree tem = make_ssa_name (vec_dest, new_vec_stmt);
new_vec_stmt = gimple_build_assign (tem, code,
first_vect, second_vect);
gsi_insert_before (&exit_gsi, new_vec_stmt, GSI_SAME_STMT);
first_vect = tem;
}
new_phi_result = first_vect;
new_phis.truncate (0);
new_phis.safe_push (new_vec_stmt);
}
else
new_phi_result = PHI_RESULT (new_phis[0]);
if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
&& reduc_fn != IFN_LAST)
{
/* For condition reductions, we have a vector (NEW_PHI_RESULT) containing
various data values where the condition matched and another vector
(INDUCTION_INDEX) containing all the indexes of those matches. We
need to extract the last matching index (which will be the index with
highest value) and use this to index into the data vector.
For the case where there were no matches, the data vector will contain
all default values and the index vector will be all zeros. */
/* Get various versions of the type of the vector of indexes. */
tree index_vec_type = TREE_TYPE (induction_index);
gcc_checking_assert (TYPE_UNSIGNED (index_vec_type));
tree index_scalar_type = TREE_TYPE (index_vec_type);
tree index_vec_cmp_type = build_same_sized_truth_vector_type
(index_vec_type);
/* Get an unsigned integer version of the type of the data vector. */
int scalar_precision
= GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
tree scalar_type_unsigned = make_unsigned_type (scalar_precision);
tree vectype_unsigned = build_vector_type
(scalar_type_unsigned, TYPE_VECTOR_SUBPARTS (vectype));
/* First we need to create a vector (ZERO_VEC) of zeros and another
vector (MAX_INDEX_VEC) filled with the last matching index, which we
can create using a MAX reduction and then expanding.
In the case where the loop never made any matches, the max index will
be zero. */
/* Vector of {0, 0, 0,...}. */
tree zero_vec = make_ssa_name (vectype);
tree zero_vec_rhs = build_zero_cst (vectype);
gimple *zero_vec_stmt = gimple_build_assign (zero_vec, zero_vec_rhs);
gsi_insert_before (&exit_gsi, zero_vec_stmt, GSI_SAME_STMT);
/* Find maximum value from the vector of found indexes. */
tree max_index = make_ssa_name (index_scalar_type);
gcall *max_index_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
1, induction_index);
gimple_call_set_lhs (max_index_stmt, max_index);
gsi_insert_before (&exit_gsi, max_index_stmt, GSI_SAME_STMT);
/* Vector of {max_index, max_index, max_index,...}. */
tree max_index_vec = make_ssa_name (index_vec_type);
tree max_index_vec_rhs = build_vector_from_val (index_vec_type,
max_index);
gimple *max_index_vec_stmt = gimple_build_assign (max_index_vec,
max_index_vec_rhs);
gsi_insert_before (&exit_gsi, max_index_vec_stmt, GSI_SAME_STMT);
/* Next we compare the new vector (MAX_INDEX_VEC) full of max indexes
with the vector (INDUCTION_INDEX) of found indexes, choosing values
from the data vector (NEW_PHI_RESULT) for matches, 0 (ZERO_VEC)
otherwise. Only one value should match, resulting in a vector
(VEC_COND) with one data value and the rest zeros.
In the case where the loop never made any matches, every index will
match, resulting in a vector with all data values (which will all be
the default value). */
/* Compare the max index vector to the vector of found indexes to find
the position of the max value. */
tree vec_compare = make_ssa_name (index_vec_cmp_type);
gimple *vec_compare_stmt = gimple_build_assign (vec_compare, EQ_EXPR,
induction_index,
max_index_vec);
gsi_insert_before (&exit_gsi, vec_compare_stmt, GSI_SAME_STMT);
/* Use the compare to choose either values from the data vector or
zero. */
tree vec_cond = make_ssa_name (vectype);
gimple *vec_cond_stmt = gimple_build_assign (vec_cond, VEC_COND_EXPR,
vec_compare, new_phi_result,
zero_vec);
gsi_insert_before (&exit_gsi, vec_cond_stmt, GSI_SAME_STMT);
/* Finally we need to extract the data value from the vector (VEC_COND)
into a scalar (MATCHED_DATA_REDUC). Logically we want to do a OR
reduction, but because this doesn't exist, we can use a MAX reduction
instead. The data value might be signed or a float so we need to cast
it first.
In the case where the loop never made any matches, the data values are
all identical, and so will reduce down correctly. */
/* Make the matched data values unsigned. */
tree vec_cond_cast = make_ssa_name (vectype_unsigned);
tree vec_cond_cast_rhs = build1 (VIEW_CONVERT_EXPR, vectype_unsigned,
vec_cond);
gimple *vec_cond_cast_stmt = gimple_build_assign (vec_cond_cast,
VIEW_CONVERT_EXPR,
vec_cond_cast_rhs);
gsi_insert_before (&exit_gsi, vec_cond_cast_stmt, GSI_SAME_STMT);
/* Reduce down to a scalar value. */
tree data_reduc = make_ssa_name (scalar_type_unsigned);
gcall *data_reduc_stmt = gimple_build_call_internal (IFN_REDUC_MAX,
1, vec_cond_cast);
gimple_call_set_lhs (data_reduc_stmt, data_reduc);
gsi_insert_before (&exit_gsi, data_reduc_stmt, GSI_SAME_STMT);
/* Convert the reduced value back to the result type and set as the
result. */
gimple_seq stmts = NULL;
new_temp = gimple_build (&stmts, VIEW_CONVERT_EXPR, scalar_type,
data_reduc);
gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
scalar_results.safe_push (new_temp);
}
else if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) == COND_REDUCTION
&& reduc_fn == IFN_LAST)
{
/* Condition reduction without supported IFN_REDUC_MAX. Generate
idx = 0;
idx_val = induction_index[0];
val = data_reduc[0];
for (idx = 0, val = init, i = 0; i < nelts; ++i)
if (induction_index[i] > idx_val)
val = data_reduc[i], idx_val = induction_index[i];
return val; */
tree data_eltype = TREE_TYPE (TREE_TYPE (new_phi_result));
tree idx_eltype = TREE_TYPE (TREE_TYPE (induction_index));
unsigned HOST_WIDE_INT el_size = tree_to_uhwi (TYPE_SIZE (idx_eltype));
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (TREE_TYPE (induction_index));
/* Enforced by vectorizable_reduction, which ensures we have target
support before allowing a conditional reduction on variable-length
vectors. */
unsigned HOST_WIDE_INT v_size = el_size * nunits.to_constant ();
tree idx_val = NULL_TREE, val = NULL_TREE;
for (unsigned HOST_WIDE_INT off = 0; off < v_size; off += el_size)
{
tree old_idx_val = idx_val;
tree old_val = val;
idx_val = make_ssa_name (idx_eltype);
epilog_stmt = gimple_build_assign (idx_val, BIT_FIELD_REF,
build3 (BIT_FIELD_REF, idx_eltype,
induction_index,
bitsize_int (el_size),
bitsize_int (off)));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
val = make_ssa_name (data_eltype);
epilog_stmt = gimple_build_assign (val, BIT_FIELD_REF,
build3 (BIT_FIELD_REF,
data_eltype,
new_phi_result,
bitsize_int (el_size),
bitsize_int (off)));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
if (off != 0)
{
tree new_idx_val = idx_val;
tree new_val = val;
if (off != v_size - el_size)
{
new_idx_val = make_ssa_name (idx_eltype);
epilog_stmt = gimple_build_assign (new_idx_val,
MAX_EXPR, idx_val,
old_idx_val);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
}
new_val = make_ssa_name (data_eltype);
epilog_stmt = gimple_build_assign (new_val,
COND_EXPR,
build2 (GT_EXPR,
boolean_type_node,
idx_val,
old_idx_val),
val, old_val);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
idx_val = new_idx_val;
val = new_val;
}
}
/* Convert the reduced value back to the result type and set as the
result. */
gimple_seq stmts = NULL;
val = gimple_convert (&stmts, scalar_type, val);
gsi_insert_seq_before (&exit_gsi, stmts, GSI_SAME_STMT);
scalar_results.safe_push (val);
}
/* 2.3 Create the reduction code, using one of the three schemes described
above. In SLP we simply need to extract all the elements from the
vector (without reducing them), so we use scalar shifts. */
else if (reduc_fn != IFN_LAST && !slp_reduc)
{
tree tmp;
tree vec_elem_type;
/* Case 1: Create:
v_out2 = reduc_expr <v_out1> */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using direct vector reduction.\n");
vec_elem_type = TREE_TYPE (TREE_TYPE (new_phi_result));
if (!useless_type_conversion_p (scalar_type, vec_elem_type))
{
tree tmp_dest
= vect_create_destination_var (scalar_dest, vec_elem_type);
epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
new_phi_result);
gimple_set_lhs (epilog_stmt, tmp_dest);
new_temp = make_ssa_name (tmp_dest, epilog_stmt);
gimple_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
epilog_stmt = gimple_build_assign (new_scalar_dest, NOP_EXPR,
new_temp);
}
else
{
epilog_stmt = gimple_build_call_internal (reduc_fn, 1,
new_phi_result);
gimple_set_lhs (epilog_stmt, new_scalar_dest);
}
new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
gimple_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== INTEGER_INDUC_COND_REDUCTION)
&& !operand_equal_p (initial_def, induc_val, 0))
{
/* Earlier we set the initial value to be a vector if induc_val
values. Check the result and if it is induc_val then replace
with the original initial value, unless induc_val is
the same as initial_def already. */
tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
induc_val);
tmp = make_ssa_name (new_scalar_dest);
epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
initial_def, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
new_temp = tmp;
}
scalar_results.safe_push (new_temp);
}
else if (direct_slp_reduc)
{
/* Here we create one vector for each of the GROUP_SIZE results,
with the elements for other SLP statements replaced with the
neutral value. We can then do a normal reduction on each vector. */
/* Enforced by vectorizable_reduction. */
gcc_assert (new_phis.length () == 1);
gcc_assert (pow2p_hwi (group_size));
slp_tree orig_phis_slp_node = slp_node_instance->reduc_phis;
vec<gimple *> orig_phis = SLP_TREE_SCALAR_STMTS (orig_phis_slp_node);
gimple_seq seq = NULL;
/* Build a vector {0, 1, 2, ...}, with the same number of elements
and the same element size as VECTYPE. */
tree index = build_index_vector (vectype, 0, 1);
tree index_type = TREE_TYPE (index);
tree index_elt_type = TREE_TYPE (index_type);
tree mask_type = build_same_sized_truth_vector_type (index_type);
/* Create a vector that, for each element, identifies which of
the GROUP_SIZE results should use it. */
tree index_mask = build_int_cst (index_elt_type, group_size - 1);
index = gimple_build (&seq, BIT_AND_EXPR, index_type, index,
build_vector_from_val (index_type, index_mask));
/* Get a neutral vector value. This is simply a splat of the neutral
scalar value if we have one, otherwise the initial scalar value
is itself a neutral value. */
tree vector_identity = NULL_TREE;
if (neutral_op)
vector_identity = gimple_build_vector_from_val (&seq, vectype,
neutral_op);
for (unsigned int i = 0; i < group_size; ++i)
{
/* If there's no univeral neutral value, we can use the
initial scalar value from the original PHI. This is used
for MIN and MAX reduction, for example. */
if (!neutral_op)
{
tree scalar_value
= PHI_ARG_DEF_FROM_EDGE (orig_phis[i],
loop_preheader_edge (loop));
vector_identity = gimple_build_vector_from_val (&seq, vectype,
scalar_value);
}
/* Calculate the equivalent of:
sel[j] = (index[j] == i);
which selects the elements of NEW_PHI_RESULT that should
be included in the result. */
tree compare_val = build_int_cst (index_elt_type, i);
compare_val = build_vector_from_val (index_type, compare_val);
tree sel = gimple_build (&seq, EQ_EXPR, mask_type,
index, compare_val);
/* Calculate the equivalent of:
vec = seq ? new_phi_result : vector_identity;
VEC is now suitable for a full vector reduction. */
tree vec = gimple_build (&seq, VEC_COND_EXPR, vectype,
sel, new_phi_result, vector_identity);
/* Do the reduction and convert it to the appropriate type. */
gcall *call = gimple_build_call_internal (reduc_fn, 1, vec);
tree scalar = make_ssa_name (TREE_TYPE (vectype));
gimple_call_set_lhs (call, scalar);
gimple_seq_add_stmt (&seq, call);
scalar = gimple_convert (&seq, scalar_type, scalar);
scalar_results.safe_push (scalar);
}
gsi_insert_seq_before (&exit_gsi, seq, GSI_SAME_STMT);
}
else
{
bool reduce_with_shift;
tree vec_temp;
/* COND reductions all do the final reduction with MAX_EXPR
or MIN_EXPR. */
if (code == COND_EXPR)
{
if (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== INTEGER_INDUC_COND_REDUCTION)
code = induc_code;
else
code = MAX_EXPR;
}
/* See if the target wants to do the final (shift) reduction
in a vector mode of smaller size and first reduce upper/lower
halves against each other. */
enum machine_mode mode1 = mode;
tree vectype1 = vectype;
unsigned sz = tree_to_uhwi (TYPE_SIZE_UNIT (vectype));
unsigned sz1 = sz;
if (!slp_reduc
&& (mode1 = targetm.vectorize.split_reduction (mode)) != mode)
sz1 = GET_MODE_SIZE (mode1).to_constant ();
vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz1);
reduce_with_shift = have_whole_vector_shift (mode1);
if (!VECTOR_MODE_P (mode1))
reduce_with_shift = false;
else
{
optab optab = optab_for_tree_code (code, vectype1, optab_default);
if (optab_handler (optab, mode1) == CODE_FOR_nothing)
reduce_with_shift = false;
}
/* First reduce the vector to the desired vector size we should
do shift reduction on by combining upper and lower halves. */
new_temp = new_phi_result;
while (sz > sz1)
{
gcc_assert (!slp_reduc);
sz /= 2;
vectype1 = get_vectype_for_scalar_type_and_size (scalar_type, sz);
/* The target has to make sure we support lowpart/highpart
extraction, either via direct vector extract or through
an integer mode punning. */
tree dst1, dst2;
if (convert_optab_handler (vec_extract_optab,
TYPE_MODE (TREE_TYPE (new_temp)),
TYPE_MODE (vectype1))
!= CODE_FOR_nothing)
{
/* Extract sub-vectors directly once vec_extract becomes
a conversion optab. */
dst1 = make_ssa_name (vectype1);
epilog_stmt
= gimple_build_assign (dst1, BIT_FIELD_REF,
build3 (BIT_FIELD_REF, vectype1,
new_temp, TYPE_SIZE (vectype1),
bitsize_int (0)));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
dst2 = make_ssa_name (vectype1);
epilog_stmt
= gimple_build_assign (dst2, BIT_FIELD_REF,
build3 (BIT_FIELD_REF, vectype1,
new_temp, TYPE_SIZE (vectype1),
bitsize_int (sz * BITS_PER_UNIT)));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
}
else
{
/* Extract via punning to appropriately sized integer mode
vector. */
tree eltype = build_nonstandard_integer_type (sz * BITS_PER_UNIT,
1);
tree etype = build_vector_type (eltype, 2);
gcc_assert (convert_optab_handler (vec_extract_optab,
TYPE_MODE (etype),
TYPE_MODE (eltype))
!= CODE_FOR_nothing);
tree tem = make_ssa_name (etype);
epilog_stmt = gimple_build_assign (tem, VIEW_CONVERT_EXPR,
build1 (VIEW_CONVERT_EXPR,
etype, new_temp));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
new_temp = tem;
tem = make_ssa_name (eltype);
epilog_stmt
= gimple_build_assign (tem, BIT_FIELD_REF,
build3 (BIT_FIELD_REF, eltype,
new_temp, TYPE_SIZE (eltype),
bitsize_int (0)));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
dst1 = make_ssa_name (vectype1);
epilog_stmt = gimple_build_assign (dst1, VIEW_CONVERT_EXPR,
build1 (VIEW_CONVERT_EXPR,
vectype1, tem));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
tem = make_ssa_name (eltype);
epilog_stmt
= gimple_build_assign (tem, BIT_FIELD_REF,
build3 (BIT_FIELD_REF, eltype,
new_temp, TYPE_SIZE (eltype),
bitsize_int (sz * BITS_PER_UNIT)));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
dst2 = make_ssa_name (vectype1);
epilog_stmt = gimple_build_assign (dst2, VIEW_CONVERT_EXPR,
build1 (VIEW_CONVERT_EXPR,
vectype1, tem));
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
}
new_temp = make_ssa_name (vectype1);
epilog_stmt = gimple_build_assign (new_temp, code, dst1, dst2);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
}
if (reduce_with_shift && !slp_reduc)
{
int element_bitsize = tree_to_uhwi (bitsize);
/* Enforced by vectorizable_reduction, which disallows SLP reductions
for variable-length vectors and also requires direct target support
for loop reductions. */
int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
int nelements = vec_size_in_bits / element_bitsize;
vec_perm_builder sel;
vec_perm_indices indices;
int elt_offset;
tree zero_vec = build_zero_cst (vectype1);
/* Case 2: Create:
for (offset = nelements/2; offset >= 1; offset/=2)
{
Create: va' = vec_shift <va, offset>
Create: va = vop <va, va'>
} */
tree rhs;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using vector shifts\n");
mode1 = TYPE_MODE (vectype1);
vec_dest = vect_create_destination_var (scalar_dest, vectype1);
for (elt_offset = nelements / 2;
elt_offset >= 1;
elt_offset /= 2)
{
calc_vec_perm_mask_for_shift (elt_offset, nelements, &sel);
indices.new_vector (sel, 2, nelements);
tree mask = vect_gen_perm_mask_any (vectype1, indices);
epilog_stmt = gimple_build_assign (vec_dest, VEC_PERM_EXPR,
new_temp, zero_vec, mask);
new_name = make_ssa_name (vec_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_name);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
epilog_stmt = gimple_build_assign (vec_dest, code, new_name,
new_temp);
new_temp = make_ssa_name (vec_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
}
/* 2.4 Extract the final scalar result. Create:
s_out3 = extract_field <v_out2, bitpos> */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"extract scalar result\n");
rhs = build3 (BIT_FIELD_REF, scalar_type, new_temp,
bitsize, bitsize_zero_node);
epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
scalar_results.safe_push (new_temp);
}
else
{
/* Case 3: Create:
s = extract_field <v_out2, 0>
for (offset = element_size;
offset < vector_size;
offset += element_size;)
{
Create: s' = extract_field <v_out2, offset>
Create: s = op <s, s'> // For non SLP cases
} */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Reduce using scalar code.\n");
int vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype1));
int element_bitsize = tree_to_uhwi (bitsize);
FOR_EACH_VEC_ELT (new_phis, i, new_phi)
{
int bit_offset;
if (gimple_code (new_phi) == GIMPLE_PHI)
vec_temp = PHI_RESULT (new_phi);
else
vec_temp = gimple_assign_lhs (new_phi);
tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp, bitsize,
bitsize_zero_node);
epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
/* In SLP we don't need to apply reduction operation, so we just
collect s' values in SCALAR_RESULTS. */
if (slp_reduc)
scalar_results.safe_push (new_temp);
for (bit_offset = element_bitsize;
bit_offset < vec_size_in_bits;
bit_offset += element_bitsize)
{
tree bitpos = bitsize_int (bit_offset);
tree rhs = build3 (BIT_FIELD_REF, scalar_type, vec_temp,
bitsize, bitpos);
epilog_stmt = gimple_build_assign (new_scalar_dest, rhs);
new_name = make_ssa_name (new_scalar_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_name);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
if (slp_reduc)
{
/* In SLP we don't need to apply reduction operation, so
we just collect s' values in SCALAR_RESULTS. */
new_temp = new_name;
scalar_results.safe_push (new_name);
}
else
{
epilog_stmt = gimple_build_assign (new_scalar_dest, code,
new_name, new_temp);
new_temp = make_ssa_name (new_scalar_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
}
}
}
/* The only case where we need to reduce scalar results in SLP, is
unrolling. If the size of SCALAR_RESULTS is greater than
GROUP_SIZE, we reduce them combining elements modulo
GROUP_SIZE. */
if (slp_reduc)
{
tree res, first_res, new_res;
gimple *new_stmt;
/* Reduce multiple scalar results in case of SLP unrolling. */
for (j = group_size; scalar_results.iterate (j, &res);
j++)
{
first_res = scalar_results[j % group_size];
new_stmt = gimple_build_assign (new_scalar_dest, code,
first_res, res);
new_res = make_ssa_name (new_scalar_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, new_res);
gsi_insert_before (&exit_gsi, new_stmt, GSI_SAME_STMT);
scalar_results[j % group_size] = new_res;
}
}
else
/* Not SLP - we have one scalar to keep in SCALAR_RESULTS. */
scalar_results.safe_push (new_temp);
}
if ((STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== INTEGER_INDUC_COND_REDUCTION)
&& !operand_equal_p (initial_def, induc_val, 0))
{
/* Earlier we set the initial value to be a vector if induc_val
values. Check the result and if it is induc_val then replace
with the original initial value, unless induc_val is
the same as initial_def already. */
tree zcompare = build2 (EQ_EXPR, boolean_type_node, new_temp,
induc_val);
tree tmp = make_ssa_name (new_scalar_dest);
epilog_stmt = gimple_build_assign (tmp, COND_EXPR, zcompare,
initial_def, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
scalar_results[0] = tmp;
}
}
vect_finalize_reduction:
if (double_reduc)
loop = loop->inner;
/* 2.5 Adjust the final result by the initial value of the reduction
variable. (When such adjustment is not needed, then
'adjustment_def' is zero). For example, if code is PLUS we create:
new_temp = loop_exit_def + adjustment_def */
if (adjustment_def)
{
gcc_assert (!slp_reduc);
if (nested_in_vect_loop)
{
new_phi = new_phis[0];
gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) == VECTOR_TYPE);
expr = build2 (code, vectype, PHI_RESULT (new_phi), adjustment_def);
new_dest = vect_create_destination_var (scalar_dest, vectype);
}
else
{
new_temp = scalar_results[0];
gcc_assert (TREE_CODE (TREE_TYPE (adjustment_def)) != VECTOR_TYPE);
expr = build2 (code, scalar_type, new_temp, adjustment_def);
new_dest = vect_create_destination_var (scalar_dest, scalar_type);
}
epilog_stmt = gimple_build_assign (new_dest, expr);
new_temp = make_ssa_name (new_dest, epilog_stmt);
gimple_assign_set_lhs (epilog_stmt, new_temp);
gsi_insert_before (&exit_gsi, epilog_stmt, GSI_SAME_STMT);
if (nested_in_vect_loop)
{
set_vinfo_for_stmt (epilog_stmt,
new_stmt_vec_info (epilog_stmt, loop_vinfo));
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (epilog_stmt)) =
STMT_VINFO_RELATED_STMT (vinfo_for_stmt (new_phi));
if (!double_reduc)
scalar_results.quick_push (new_temp);
else
scalar_results[0] = new_temp;
}
else
scalar_results[0] = new_temp;
new_phis[0] = epilog_stmt;
}
/* 2.6 Handle the loop-exit phis. Replace the uses of scalar loop-exit
phis with new adjusted scalar results, i.e., replace use <s_out0>
with use <s_out4>.
Transform:
loop_exit:
s_out0 = phi <s_loop> # (scalar) EXIT_PHI
v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
v_out2 = reduce <v_out1>
s_out3 = extract_field <v_out2, 0>
s_out4 = adjust_result <s_out3>
use <s_out0>
use <s_out0>
into:
loop_exit:
s_out0 = phi <s_loop> # (scalar) EXIT_PHI
v_out1 = phi <VECT_DEF> # NEW_EXIT_PHI
v_out2 = reduce <v_out1>
s_out3 = extract_field <v_out2, 0>
s_out4 = adjust_result <s_out3>
use <s_out4>
use <s_out4> */
/* In SLP reduction chain we reduce vector results into one vector if
necessary, hence we set here GROUP_SIZE to 1. SCALAR_DEST is the LHS of
the last stmt in the reduction chain, since we are looking for the loop
exit phi node. */
if (GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
gimple *dest_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
/* Handle reduction patterns. */
if (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt)))
dest_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (dest_stmt));
scalar_dest = gimple_assign_lhs (dest_stmt);
group_size = 1;
}
/* In SLP we may have several statements in NEW_PHIS and REDUCTION_PHIS (in
case that GROUP_SIZE is greater than vectorization factor). Therefore, we
need to match SCALAR_RESULTS with corresponding statements. The first
(GROUP_SIZE / number of new vector stmts) scalar results correspond to
the first vector stmt, etc.
(RATIO is equal to (GROUP_SIZE / number of new vector stmts)). */
if (group_size > new_phis.length ())
{
ratio = group_size / new_phis.length ();
gcc_assert (!(group_size % new_phis.length ()));
}
else
ratio = 1;
for (k = 0; k < group_size; k++)
{
if (k % ratio == 0)
{
epilog_stmt = new_phis[k / ratio];
reduction_phi = reduction_phis[k / ratio];
if (double_reduc)
inner_phi = inner_phis[k / ratio];
}
if (slp_reduc)
{
gimple *current_stmt = SLP_TREE_SCALAR_STMTS (slp_node)[k];
orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (current_stmt));
/* SLP statements can't participate in patterns. */
gcc_assert (!orig_stmt);
scalar_dest = gimple_assign_lhs (current_stmt);
}
phis.create (3);
/* Find the loop-closed-use at the loop exit of the original scalar
result. (The reduction result is expected to have two immediate uses -
one at the latch block, and one at the loop exit). */
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p)))
&& !is_gimple_debug (USE_STMT (use_p)))
phis.safe_push (USE_STMT (use_p));
/* While we expect to have found an exit_phi because of loop-closed-ssa
form we can end up without one if the scalar cycle is dead. */
FOR_EACH_VEC_ELT (phis, i, exit_phi)
{
if (outer_loop)
{
stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
gphi *vect_phi;
/* FORNOW. Currently not supporting the case that an inner-loop
reduction is not used in the outer-loop (but only outside the
outer-loop), unless it is double reduction. */
gcc_assert ((STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
&& !STMT_VINFO_LIVE_P (exit_phi_vinfo))
|| double_reduc);
if (double_reduc)
STMT_VINFO_VEC_STMT (exit_phi_vinfo) = inner_phi;
else
STMT_VINFO_VEC_STMT (exit_phi_vinfo) = epilog_stmt;
if (!double_reduc
|| STMT_VINFO_DEF_TYPE (exit_phi_vinfo)
!= vect_double_reduction_def)
continue;
/* Handle double reduction:
stmt1: s1 = phi <s0, s2> - double reduction phi (outer loop)
stmt2: s3 = phi <s1, s4> - (regular) reduc phi (inner loop)
stmt3: s4 = use (s3) - (regular) reduc stmt (inner loop)
stmt4: s2 = phi <s4> - double reduction stmt (outer loop)
At that point the regular reduction (stmt2 and stmt3) is
already vectorized, as well as the exit phi node, stmt4.
Here we vectorize the phi node of double reduction, stmt1, and
update all relevant statements. */
/* Go through all the uses of s2 to find double reduction phi
node, i.e., stmt1 above. */
orig_name = PHI_RESULT (exit_phi);
FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
{
stmt_vec_info use_stmt_vinfo;
stmt_vec_info new_phi_vinfo;
tree vect_phi_init, preheader_arg, vect_phi_res;
basic_block bb = gimple_bb (use_stmt);
gimple *use;
/* Check that USE_STMT is really double reduction phi
node. */
if (gimple_code (use_stmt) != GIMPLE_PHI
|| gimple_phi_num_args (use_stmt) != 2
|| bb->loop_father != outer_loop)
continue;
use_stmt_vinfo = vinfo_for_stmt (use_stmt);
if (!use_stmt_vinfo
|| STMT_VINFO_DEF_TYPE (use_stmt_vinfo)
!= vect_double_reduction_def)
continue;
/* Create vector phi node for double reduction:
vs1 = phi <vs0, vs2>
vs1 was created previously in this function by a call to
vect_get_vec_def_for_operand and is stored in
vec_initial_def;
vs2 is defined by INNER_PHI, the vectorized EXIT_PHI;
vs0 is created here. */
/* Create vector phi node. */
vect_phi = create_phi_node (vec_initial_def, bb);
new_phi_vinfo = new_stmt_vec_info (vect_phi,
loop_vec_info_for_loop (outer_loop));
set_vinfo_for_stmt (vect_phi, new_phi_vinfo);
/* Create vs0 - initial def of the double reduction phi. */
preheader_arg = PHI_ARG_DEF_FROM_EDGE (use_stmt,
loop_preheader_edge (outer_loop));
vect_phi_init = get_initial_def_for_reduction
(stmt, preheader_arg, NULL);
/* Update phi node arguments with vs0 and vs2. */
add_phi_arg (vect_phi, vect_phi_init,
loop_preheader_edge (outer_loop),
UNKNOWN_LOCATION);
add_phi_arg (vect_phi, PHI_RESULT (inner_phi),
loop_latch_edge (outer_loop), UNKNOWN_LOCATION);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"created double reduction phi node: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, vect_phi, 0);
}
vect_phi_res = PHI_RESULT (vect_phi);
/* Replace the use, i.e., set the correct vs1 in the regular
reduction phi node. FORNOW, NCOPIES is always 1, so the
loop is redundant. */
use = reduction_phi;
for (j = 0; j < ncopies; j++)
{
edge pr_edge = loop_preheader_edge (loop);
SET_PHI_ARG_DEF (use, pr_edge->dest_idx, vect_phi_res);
use = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use));
}
}
}
}
phis.release ();
if (nested_in_vect_loop)
{
if (double_reduc)
loop = outer_loop;
else
continue;
}
phis.create (3);
/* Find the loop-closed-use at the loop exit of the original scalar
result. (The reduction result is expected to have two immediate uses,
one at the latch block, and one at the loop exit). For double
reductions we are looking for exit phis of the outer loop. */
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, scalar_dest)
{
if (!flow_bb_inside_loop_p (loop, gimple_bb (USE_STMT (use_p))))
{
if (!is_gimple_debug (USE_STMT (use_p)))
phis.safe_push (USE_STMT (use_p));
}
else
{
if (double_reduc && gimple_code (USE_STMT (use_p)) == GIMPLE_PHI)
{
tree phi_res = PHI_RESULT (USE_STMT (use_p));
FOR_EACH_IMM_USE_FAST (phi_use_p, phi_imm_iter, phi_res)
{
if (!flow_bb_inside_loop_p (loop,
gimple_bb (USE_STMT (phi_use_p)))
&& !is_gimple_debug (USE_STMT (phi_use_p)))
phis.safe_push (USE_STMT (phi_use_p));
}
}
}
}
FOR_EACH_VEC_ELT (phis, i, exit_phi)
{
/* Replace the uses: */
orig_name = PHI_RESULT (exit_phi);
scalar_result = scalar_results[k];
FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, orig_name)
FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
SET_USE (use_p, scalar_result);
}
phis.release ();
}
}
/* Return a vector of type VECTYPE that is equal to the vector select
operation "MASK ? VEC : IDENTITY". Insert the select statements
before GSI. */
static tree
merge_with_identity (gimple_stmt_iterator *gsi, tree mask, tree vectype,
tree vec, tree identity)
{
tree cond = make_temp_ssa_name (vectype, NULL, "cond");
gimple *new_stmt = gimple_build_assign (cond, VEC_COND_EXPR,
mask, vec, identity);
gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
return cond;
}
/* Successively apply CODE to each element of VECTOR_RHS, in left-to-right
order, starting with LHS. Insert the extraction statements before GSI and
associate the new scalar SSA names with variable SCALAR_DEST.
Return the SSA name for the result. */
static tree
vect_expand_fold_left (gimple_stmt_iterator *gsi, tree scalar_dest,
tree_code code, tree lhs, tree vector_rhs)
{
tree vectype = TREE_TYPE (vector_rhs);
tree scalar_type = TREE_TYPE (vectype);
tree bitsize = TYPE_SIZE (scalar_type);
unsigned HOST_WIDE_INT vec_size_in_bits = tree_to_uhwi (TYPE_SIZE (vectype));
unsigned HOST_WIDE_INT element_bitsize = tree_to_uhwi (bitsize);
for (unsigned HOST_WIDE_INT bit_offset = 0;
bit_offset < vec_size_in_bits;
bit_offset += element_bitsize)
{
tree bitpos = bitsize_int (bit_offset);
tree rhs = build3 (BIT_FIELD_REF, scalar_type, vector_rhs,
bitsize, bitpos);
gassign *stmt = gimple_build_assign (scalar_dest, rhs);
rhs = make_ssa_name (scalar_dest, stmt);
gimple_assign_set_lhs (stmt, rhs);
gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
stmt = gimple_build_assign (scalar_dest, code, lhs, rhs);
tree new_name = make_ssa_name (scalar_dest, stmt);
gimple_assign_set_lhs (stmt, new_name);
gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
lhs = new_name;
}
return lhs;
}
/* Perform an in-order reduction (FOLD_LEFT_REDUCTION). STMT is the
statement that sets the live-out value. REDUC_DEF_STMT is the phi
statement. CODE is the operation performed by STMT and OPS are
its scalar operands. REDUC_INDEX is the index of the operand in
OPS that is set by REDUC_DEF_STMT. REDUC_FN is the function that
implements in-order reduction, or IFN_LAST if we should open-code it.
VECTYPE_IN is the type of the vector input. MASKS specifies the masks
that should be used to control the operation in a fully-masked loop. */
static bool
vectorize_fold_left_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
gimple **vec_stmt, slp_tree slp_node,
gimple *reduc_def_stmt,
tree_code code, internal_fn reduc_fn,
tree ops[3], tree vectype_in,
int reduc_index, vec_loop_masks *masks)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
gimple *new_stmt = NULL;
int ncopies;
if (slp_node)
ncopies = 1;
else
ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
gcc_assert (!nested_in_vect_loop_p (loop, stmt));
gcc_assert (ncopies == 1);
gcc_assert (TREE_CODE_LENGTH (code) == binary_op);
gcc_assert (reduc_index == (code == MINUS_EXPR ? 0 : 1));
gcc_assert (STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
== FOLD_LEFT_REDUCTION);
if (slp_node)
gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype_out),
TYPE_VECTOR_SUBPARTS (vectype_in)));
tree op0 = ops[1 - reduc_index];
int group_size = 1;
gimple *scalar_dest_def;
auto_vec<tree> vec_oprnds0;
if (slp_node)
{
vect_get_vec_defs (op0, NULL_TREE, stmt, &vec_oprnds0, NULL, slp_node);
group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
scalar_dest_def = SLP_TREE_SCALAR_STMTS (slp_node)[group_size - 1];
}
else
{
tree loop_vec_def0 = vect_get_vec_def_for_operand (op0, stmt);
vec_oprnds0.create (1);
vec_oprnds0.quick_push (loop_vec_def0);
scalar_dest_def = stmt;
}
tree scalar_dest = gimple_assign_lhs (scalar_dest_def);
tree scalar_type = TREE_TYPE (scalar_dest);
tree reduc_var = gimple_phi_result (reduc_def_stmt);
int vec_num = vec_oprnds0.length ();
gcc_assert (vec_num == 1 || slp_node);
tree vec_elem_type = TREE_TYPE (vectype_out);
gcc_checking_assert (useless_type_conversion_p (scalar_type, vec_elem_type));
tree vector_identity = NULL_TREE;
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
vector_identity = build_zero_cst (vectype_out);
tree scalar_dest_var = vect_create_destination_var (scalar_dest, NULL);
int i;
tree def0;
FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
{
tree mask = NULL_TREE;
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
mask = vect_get_loop_mask (gsi, masks, vec_num, vectype_in, i);
/* Handle MINUS by adding the negative. */
if (reduc_fn != IFN_LAST && code == MINUS_EXPR)
{
tree negated = make_ssa_name (vectype_out);
new_stmt = gimple_build_assign (negated, NEGATE_EXPR, def0);
gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
def0 = negated;
}
if (mask)
def0 = merge_with_identity (gsi, mask, vectype_out, def0,
vector_identity);
/* On the first iteration the input is simply the scalar phi
result, and for subsequent iterations it is the output of
the preceding operation. */
if (reduc_fn != IFN_LAST)
{
new_stmt = gimple_build_call_internal (reduc_fn, 2, reduc_var, def0);
/* For chained SLP reductions the output of the previous reduction
operation serves as the input of the next. For the final statement
the output cannot be a temporary - we reuse the original
scalar destination of the last statement. */
if (i != vec_num - 1)
{
gimple_set_lhs (new_stmt, scalar_dest_var);
reduc_var = make_ssa_name (scalar_dest_var, new_stmt);
gimple_set_lhs (new_stmt, reduc_var);
}
}
else
{
reduc_var = vect_expand_fold_left (gsi, scalar_dest_var, code,
reduc_var, def0);
new_stmt = SSA_NAME_DEF_STMT (reduc_var);
/* Remove the statement, so that we can use the same code paths
as for statements that we've just created. */
gimple_stmt_iterator tmp_gsi = gsi_for_stmt (new_stmt);
gsi_remove (&tmp_gsi, false);
}
if (i == vec_num - 1)
{
gimple_set_lhs (new_stmt, scalar_dest);
vect_finish_replace_stmt (scalar_dest_def, new_stmt);
}
else
vect_finish_stmt_generation (scalar_dest_def, new_stmt, gsi);
if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
if (!slp_node)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
return true;
}
/* Function is_nonwrapping_integer_induction.
Check if STMT (which is part of loop LOOP) both increments and
does not cause overflow. */
static bool
is_nonwrapping_integer_induction (gimple *stmt, struct loop *loop)
{
stmt_vec_info stmt_vinfo = vinfo_for_stmt (stmt);
tree base = STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (stmt_vinfo);
tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_vinfo);
tree lhs_type = TREE_TYPE (gimple_phi_result (stmt));
widest_int ni, max_loop_value, lhs_max;
bool overflow = false;
/* Make sure the loop is integer based. */
if (TREE_CODE (base) != INTEGER_CST
|| TREE_CODE (step) != INTEGER_CST)
return false;
/* Check that the max size of the loop will not wrap. */
if (TYPE_OVERFLOW_UNDEFINED (lhs_type))
return true;
if (! max_stmt_executions (loop, &ni))
return false;
max_loop_value = wi::mul (wi::to_widest (step), ni, TYPE_SIGN (lhs_type),
&overflow);
if (overflow)
return false;
max_loop_value = wi::add (wi::to_widest (base), max_loop_value,
TYPE_SIGN (lhs_type), &overflow);
if (overflow)
return false;
return (wi::min_precision (max_loop_value, TYPE_SIGN (lhs_type))
<= TYPE_PRECISION (lhs_type));
}
/* Function vectorizable_reduction.
Check if STMT performs a reduction operation that can be vectorized.
If VEC_STMT is also passed, vectorize the STMT: create a vectorized
stmt to replace it, put it in VEC_STMT, and insert it at GSI.
Return FALSE if not a vectorizable STMT, TRUE otherwise.
This function also handles reduction idioms (patterns) that have been
recognized in advance during vect_pattern_recog. In this case, STMT may be
of this form:
X = pattern_expr (arg0, arg1, ..., X)
and it's STMT_VINFO_RELATED_STMT points to the last stmt in the original
sequence that had been detected and replaced by the pattern-stmt (STMT).
This function also handles reduction of condition expressions, for example:
for (int i = 0; i < N; i++)
if (a[i] < value)
last = a[i];
This is handled by vectorising the loop and creating an additional vector
containing the loop indexes for which "a[i] < value" was true. In the
function epilogue this is reduced to a single max value and then used to
index into the vector of results.
In some cases of reduction patterns, the type of the reduction variable X is
different than the type of the other arguments of STMT.
In such cases, the vectype that is used when transforming STMT into a vector
stmt is different than the vectype that is used to determine the
vectorization factor, because it consists of a different number of elements
than the actual number of elements that are being operated upon in parallel.
For example, consider an accumulation of shorts into an int accumulator.
On some targets it's possible to vectorize this pattern operating on 8
shorts at a time (hence, the vectype for purposes of determining the
vectorization factor should be V8HI); on the other hand, the vectype that
is used to create the vector form is actually V4SI (the type of the result).
Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
indicates what is the actual level of parallelism (V8HI in the example), so
that the right vectorization factor would be derived. This vectype
corresponds to the type of arguments to the reduction stmt, and should *NOT*
be used to create the vectorized stmt. The right vectype for the vectorized
stmt is obtained from the type of the result X:
get_vectype_for_scalar_type (TREE_TYPE (X))
This means that, contrary to "regular" reductions (or "regular" stmts in
general), the following equation:
STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (TREE_TYPE (X))
does *NOT* necessarily hold for reduction patterns. */
bool
vectorizable_reduction (gimple *stmt, gimple_stmt_iterator *gsi,
gimple **vec_stmt, slp_tree slp_node,
slp_instance slp_node_instance)
{
tree vec_dest;
tree scalar_dest;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree vectype_out = STMT_VINFO_VECTYPE (stmt_info);
tree vectype_in = NULL_TREE;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
enum tree_code code, orig_code;
internal_fn reduc_fn;
machine_mode vec_mode;
int op_type;
optab optab;
tree new_temp = NULL_TREE;
gimple *def_stmt;
enum vect_def_type dt, cond_reduc_dt = vect_unknown_def_type;
gimple *cond_reduc_def_stmt = NULL;
enum tree_code cond_reduc_op_code = ERROR_MARK;
tree scalar_type;
bool is_simple_use;
gimple *orig_stmt;
stmt_vec_info orig_stmt_info = NULL;
int i;
int ncopies;
int epilog_copies;
stmt_vec_info prev_stmt_info, prev_phi_info;
bool single_defuse_cycle = false;
gimple *new_stmt = NULL;
int j;
tree ops[3];
enum vect_def_type dts[3];
bool nested_cycle = false, found_nested_cycle_def = false;
bool double_reduc = false;
basic_block def_bb;
struct loop * def_stmt_loop, *outer_loop = NULL;
tree def_arg;
gimple *def_arg_stmt;
auto_vec<tree> vec_oprnds0;
auto_vec<tree> vec_oprnds1;
auto_vec<tree> vec_oprnds2;
auto_vec<tree> vect_defs;
auto_vec<gimple *> phis;
int vec_num;
tree def0, tem;
bool first_p = true;
tree cr_index_scalar_type = NULL_TREE, cr_index_vector_type = NULL_TREE;
tree cond_reduc_val = NULL_TREE;
/* Make sure it was already recognized as a reduction computation. */
if (STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_reduction_def
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (stmt)) != vect_nested_cycle)
return false;
if (nested_in_vect_loop_p (loop, stmt))
{
outer_loop = loop;
loop = loop->inner;
nested_cycle = true;
}
/* In case of reduction chain we switch to the first stmt in the chain, but
we don't update STMT_INFO, since only the last stmt is marked as reduction
and has reduction properties. */
if (GROUP_FIRST_ELEMENT (stmt_info)
&& GROUP_FIRST_ELEMENT (stmt_info) != stmt)
{
stmt = GROUP_FIRST_ELEMENT (stmt_info);
first_p = false;
}
if (gimple_code (stmt) == GIMPLE_PHI)
{
/* Analysis is fully done on the reduction stmt invocation. */
if (! vec_stmt)
{
if (slp_node)
slp_node_instance->reduc_phis = slp_node;
STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
return true;
}
if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
/* Leave the scalar phi in place. Note that checking
STMT_VINFO_VEC_REDUCTION_TYPE (as below) only works
for reductions involving a single statement. */
return true;
gimple *reduc_stmt = STMT_VINFO_REDUC_DEF (stmt_info);
if (STMT_VINFO_IN_PATTERN_P (vinfo_for_stmt (reduc_stmt)))
reduc_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (reduc_stmt));
if (STMT_VINFO_VEC_REDUCTION_TYPE (vinfo_for_stmt (reduc_stmt))
== EXTRACT_LAST_REDUCTION)
/* Leave the scalar phi in place. */
return true;
gcc_assert (is_gimple_assign (reduc_stmt));
for (unsigned k = 1; k < gimple_num_ops (reduc_stmt); ++k)
{
tree op = gimple_op (reduc_stmt, k);
if (op == gimple_phi_result (stmt))
continue;
if (k == 1
&& gimple_assign_rhs_code (reduc_stmt) == COND_EXPR)
continue;
if (!vectype_in
|| (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
< GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (op)))))
vectype_in = get_vectype_for_scalar_type (TREE_TYPE (op));
break;
}
gcc_assert (vectype_in);
if (slp_node)
ncopies = 1;
else
ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
use_operand_p use_p;
gimple *use_stmt;
if (ncopies > 1
&& (STMT_VINFO_RELEVANT (vinfo_for_stmt (reduc_stmt))
<= vect_used_only_live)
&& single_imm_use (gimple_phi_result (stmt), &use_p, &use_stmt)
&& (use_stmt == reduc_stmt
|| (STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt))
== reduc_stmt)))
single_defuse_cycle = true;
/* Create the destination vector */
scalar_dest = gimple_assign_lhs (reduc_stmt);
vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
if (slp_node)
/* The size vect_schedule_slp_instance computes is off for us. */
vec_num = vect_get_num_vectors
(LOOP_VINFO_VECT_FACTOR (loop_vinfo)
* SLP_TREE_SCALAR_STMTS (slp_node).length (),
vectype_in);
else
vec_num = 1;
/* Generate the reduction PHIs upfront. */
prev_phi_info = NULL;
for (j = 0; j < ncopies; j++)
{
if (j == 0 || !single_defuse_cycle)
{
for (i = 0; i < vec_num; i++)
{
/* Create the reduction-phi that defines the reduction
operand. */
gimple *new_phi = create_phi_node (vec_dest, loop->header);
set_vinfo_for_stmt (new_phi,
new_stmt_vec_info (new_phi, loop_vinfo));
if (slp_node)
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_phi);
else
{
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_phi;
else
STMT_VINFO_RELATED_STMT (prev_phi_info) = new_phi;
prev_phi_info = vinfo_for_stmt (new_phi);
}
}
}
}
return true;
}
/* 1. Is vectorizable reduction? */
/* Not supportable if the reduction variable is used in the loop, unless
it's a reduction chain. */
if (STMT_VINFO_RELEVANT (stmt_info) > vect_used_in_outer
&& !GROUP_FIRST_ELEMENT (stmt_info))
return false;
/* Reductions that are not used even in an enclosing outer-loop,
are expected to be "live" (used out of the loop). */
if (STMT_VINFO_RELEVANT (stmt_info) == vect_unused_in_scope
&& !STMT_VINFO_LIVE_P (stmt_info))
return false;
/* 2. Has this been recognized as a reduction pattern?
Check if STMT represents a pattern that has been recognized
in earlier analysis stages. For stmts that represent a pattern,
the STMT_VINFO_RELATED_STMT field records the last stmt in
the original sequence that constitutes the pattern. */
orig_stmt = STMT_VINFO_RELATED_STMT (vinfo_for_stmt (stmt));
if (orig_stmt)
{
orig_stmt_info = vinfo_for_stmt (orig_stmt);
gcc_assert (STMT_VINFO_IN_PATTERN_P (orig_stmt_info));
gcc_assert (!STMT_VINFO_IN_PATTERN_P (stmt_info));
}
/* 3. Check the operands of the operation. The first operands are defined
inside the loop body. The last operand is the reduction variable,
which is defined by the loop-header-phi. */
gcc_assert (is_gimple_assign (stmt));
/* Flatten RHS. */
switch (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)))
{
case GIMPLE_BINARY_RHS:
code = gimple_assign_rhs_code (stmt);
op_type = TREE_CODE_LENGTH (code);
gcc_assert (op_type == binary_op);
ops[0] = gimple_assign_rhs1 (stmt);
ops[1] = gimple_assign_rhs2 (stmt);
break;
case GIMPLE_TERNARY_RHS:
code = gimple_assign_rhs_code (stmt);
op_type = TREE_CODE_LENGTH (code);
gcc_assert (op_type == ternary_op);
ops[0] = gimple_assign_rhs1 (stmt);
ops[1] = gimple_assign_rhs2 (stmt);
ops[2] = gimple_assign_rhs3 (stmt);
break;
case GIMPLE_UNARY_RHS:
return false;
default:
gcc_unreachable ();
}
if (code == COND_EXPR && slp_node)
return false;
scalar_dest = gimple_assign_lhs (stmt);
scalar_type = TREE_TYPE (scalar_dest);
if (!POINTER_TYPE_P (scalar_type) && !INTEGRAL_TYPE_P (scalar_type)
&& !SCALAR_FLOAT_TYPE_P (scalar_type))
return false;
/* Do not try to vectorize bit-precision reductions. */
if (!type_has_mode_precision_p (scalar_type))
return false;
/* All uses but the last are expected to be defined in the loop.
The last use is the reduction variable. In case of nested cycle this
assumption is not true: we use reduc_index to record the index of the
reduction variable. */
gimple *reduc_def_stmt = NULL;
int reduc_index = -1;
for (i = 0; i < op_type; i++)
{
/* The condition of COND_EXPR is checked in vectorizable_condition(). */
if (i == 0 && code == COND_EXPR)
continue;
is_simple_use = vect_is_simple_use (ops[i], loop_vinfo,
&def_stmt, &dts[i], &tem);
dt = dts[i];
gcc_assert (is_simple_use);
if (dt == vect_reduction_def)
{
reduc_def_stmt = def_stmt;
reduc_index = i;
continue;
}
else if (tem)
{
/* To properly compute ncopies we are interested in the widest
input type in case we're looking at a widening accumulation. */
if (!vectype_in
|| (GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (vectype_in)))
< GET_MODE_SIZE (SCALAR_TYPE_MODE (TREE_TYPE (tem)))))
vectype_in = tem;
}
if (dt != vect_internal_def
&& dt != vect_external_def
&& dt != vect_constant_def
&& dt != vect_induction_def
&& !(dt == vect_nested_cycle && nested_cycle))
return false;
if (dt == vect_nested_cycle)
{
found_nested_cycle_def = true;
reduc_def_stmt = def_stmt;
reduc_index = i;
}
if (i == 1 && code == COND_EXPR)
{
/* Record how value of COND_EXPR is defined. */
if (dt == vect_constant_def)
{
cond_reduc_dt = dt;
cond_reduc_val = ops[i];
}
if (dt == vect_induction_def
&& def_stmt != NULL
&& is_nonwrapping_integer_induction (def_stmt, loop))
{
cond_reduc_dt = dt;
cond_reduc_def_stmt = def_stmt;
}
}
}
if (!vectype_in)
vectype_in = vectype_out;
/* When vectorizing a reduction chain w/o SLP the reduction PHI is not
directy used in stmt. */
if (reduc_index == -1)
{
if (STMT_VINFO_REDUC_TYPE (stmt_info) == FOLD_LEFT_REDUCTION)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"in-order reduction chain without SLP.\n");
return false;
}
if (orig_stmt)
reduc_def_stmt = STMT_VINFO_REDUC_DEF (orig_stmt_info);
else
reduc_def_stmt = STMT_VINFO_REDUC_DEF (stmt_info);
}
if (! reduc_def_stmt || gimple_code (reduc_def_stmt) != GIMPLE_PHI)
return false;
if (!(reduc_index == -1
|| dts[reduc_index] == vect_reduction_def
|| dts[reduc_index] == vect_nested_cycle
|| ((dts[reduc_index] == vect_internal_def
|| dts[reduc_index] == vect_external_def
|| dts[reduc_index] == vect_constant_def
|| dts[reduc_index] == vect_induction_def)
&& nested_cycle && found_nested_cycle_def)))
{
/* For pattern recognized stmts, orig_stmt might be a reduction,
but some helper statements for the pattern might not, or
might be COND_EXPRs with reduction uses in the condition. */
gcc_assert (orig_stmt);
return false;
}
stmt_vec_info reduc_def_info = vinfo_for_stmt (reduc_def_stmt);
enum vect_reduction_type v_reduc_type
= STMT_VINFO_REDUC_TYPE (reduc_def_info);
gimple *tmp = STMT_VINFO_REDUC_DEF (reduc_def_info);
STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = v_reduc_type;
/* If we have a condition reduction, see if we can simplify it further. */
if (v_reduc_type == COND_REDUCTION)
{
/* TODO: We can't yet handle reduction chains, since we need to treat
each COND_EXPR in the chain specially, not just the last one.
E.g. for:
x_1 = PHI <x_3, ...>
x_2 = a_2 ? ... : x_1;
x_3 = a_3 ? ... : x_2;
we're interested in the last element in x_3 for which a_2 || a_3
is true, whereas the current reduction chain handling would
vectorize x_2 as a normal VEC_COND_EXPR and only treat x_3
as a reduction operation. */
if (reduc_index == -1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"conditional reduction chains not supported\n");
return false;
}
/* vect_is_simple_reduction ensured that operand 2 is the
loop-carried operand. */
gcc_assert (reduc_index == 2);
/* Loop peeling modifies initial value of reduction PHI, which
makes the reduction stmt to be transformed different to the
original stmt analyzed. We need to record reduction code for
CONST_COND_REDUCTION type reduction at analyzing stage, thus
it can be used directly at transform stage. */
if (STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MAX_EXPR
|| STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info) == MIN_EXPR)
{
/* Also set the reduction type to CONST_COND_REDUCTION. */
gcc_assert (cond_reduc_dt == vect_constant_def);
STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = CONST_COND_REDUCTION;
}
else if (direct_internal_fn_supported_p (IFN_FOLD_EXTRACT_LAST,
vectype_in, OPTIMIZE_FOR_SPEED))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"optimizing condition reduction with"
" FOLD_EXTRACT_LAST.\n");
STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info) = EXTRACT_LAST_REDUCTION;
}
else if (cond_reduc_dt == vect_induction_def)
{
stmt_vec_info cond_stmt_vinfo = vinfo_for_stmt (cond_reduc_def_stmt);
tree base
= STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED (cond_stmt_vinfo);
tree step = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (cond_stmt_vinfo);
gcc_assert (TREE_CODE (base) == INTEGER_CST
&& TREE_CODE (step) == INTEGER_CST);
cond_reduc_val = NULL_TREE;
/* Find a suitable value, for MAX_EXPR below base, for MIN_EXPR
above base; punt if base is the minimum value of the type for
MAX_EXPR or maximum value of the type for MIN_EXPR for now. */
if (tree_int_cst_sgn (step) == -1)
{
cond_reduc_op_code = MIN_EXPR;
if (tree_int_cst_sgn (base) == -1)
cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
else if (tree_int_cst_lt (base,
TYPE_MAX_VALUE (TREE_TYPE (base))))
cond_reduc_val
= int_const_binop (PLUS_EXPR, base, integer_one_node);
}
else
{
cond_reduc_op_code = MAX_EXPR;
if (tree_int_cst_sgn (base) == 1)
cond_reduc_val = build_int_cst (TREE_TYPE (base), 0);
else if (tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (base)),
base))
cond_reduc_val
= int_const_binop (MINUS_EXPR, base, integer_one_node);
}
if (cond_reduc_val)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"condition expression based on "
"integer induction.\n");
STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
= INTEGER_INDUC_COND_REDUCTION;
}
}
else if (cond_reduc_dt == vect_constant_def)
{
enum vect_def_type cond_initial_dt;
gimple *def_stmt = SSA_NAME_DEF_STMT (ops[reduc_index]);
tree cond_initial_val
= PHI_ARG_DEF_FROM_EDGE (def_stmt, loop_preheader_edge (loop));
gcc_assert (cond_reduc_val != NULL_TREE);
vect_is_simple_use (cond_initial_val, loop_vinfo,
&def_stmt, &cond_initial_dt);
if (cond_initial_dt == vect_constant_def
&& types_compatible_p (TREE_TYPE (cond_initial_val),
TREE_TYPE (cond_reduc_val)))
{
tree e = fold_binary (LE_EXPR, boolean_type_node,
cond_initial_val, cond_reduc_val);
if (e && (integer_onep (e) || integer_zerop (e)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"condition expression based on "
"compile time constant.\n");
/* Record reduction code at analysis stage. */
STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info)
= integer_onep (e) ? MAX_EXPR : MIN_EXPR;
STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info)
= CONST_COND_REDUCTION;
}
}
}
}
if (orig_stmt)
gcc_assert (tmp == orig_stmt
|| GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == orig_stmt);
else
/* We changed STMT to be the first stmt in reduction chain, hence we
check that in this case the first element in the chain is STMT. */
gcc_assert (stmt == tmp
|| GROUP_FIRST_ELEMENT (vinfo_for_stmt (tmp)) == stmt);
if (STMT_VINFO_LIVE_P (vinfo_for_stmt (reduc_def_stmt)))
return false;
if (slp_node)
ncopies = 1;
else
ncopies = vect_get_num_copies (loop_vinfo, vectype_in);
gcc_assert (ncopies >= 1);
vec_mode = TYPE_MODE (vectype_in);
poly_uint64 nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
if (code == COND_EXPR)
{
/* Only call during the analysis stage, otherwise we'll lose
STMT_VINFO_TYPE. */
if (!vec_stmt && !vectorizable_condition (stmt, gsi, NULL,
ops[reduc_index], 0, NULL))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported condition in reduction\n");
return false;
}
}
else
{
/* 4. Supportable by target? */
if (code == LSHIFT_EXPR || code == RSHIFT_EXPR
|| code == LROTATE_EXPR || code == RROTATE_EXPR)
{
/* Shifts and rotates are only supported by vectorizable_shifts,
not vectorizable_reduction. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported shift or rotation.\n");
return false;
}
/* 4.1. check support for the operation in the loop */
optab = optab_for_tree_code (code, vectype_in, optab_default);
if (!optab)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no optab.\n");
return false;
}
if (optab_handler (optab, vec_mode) == CODE_FOR_nothing)
{
if (dump_enabled_p ())
dump_printf (MSG_NOTE, "op not supported by target.\n");
if (maybe_ne (GET_MODE_SIZE (vec_mode), UNITS_PER_WORD)
|| !vect_worthwhile_without_simd_p (loop_vinfo, code))
return false;
if (dump_enabled_p ())
dump_printf (MSG_NOTE, "proceeding using word mode.\n");
}
/* Worthwhile without SIMD support? */
if (!VECTOR_MODE_P (TYPE_MODE (vectype_in))
&& !vect_worthwhile_without_simd_p (loop_vinfo, code))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"not worthwhile without SIMD support.\n");
return false;
}
}
/* 4.2. Check support for the epilog operation.
If STMT represents a reduction pattern, then the type of the
reduction variable may be different than the type of the rest
of the arguments. For example, consider the case of accumulation
of shorts into an int accumulator; The original code:
S1: int_a = (int) short_a;
orig_stmt-> S2: int_acc = plus <int_a ,int_acc>;
was replaced with:
STMT: int_acc = widen_sum <short_a, int_acc>
This means that:
1. The tree-code that is used to create the vector operation in the
epilog code (that reduces the partial results) is not the
tree-code of STMT, but is rather the tree-code of the original
stmt from the pattern that STMT is replacing. I.e, in the example
above we want to use 'widen_sum' in the loop, but 'plus' in the
epilog.
2. The type (mode) we use to check available target support
for the vector operation to be created in the *epilog*, is
determined by the type of the reduction variable (in the example
above we'd check this: optab_handler (plus_optab, vect_int_mode])).
However the type (mode) we use to check available target support
for the vector operation to be created *inside the loop*, is
determined by the type of the other arguments to STMT (in the
example we'd check this: optab_handler (widen_sum_optab,
vect_short_mode)).
This is contrary to "regular" reductions, in which the types of all
the arguments are the same as the type of the reduction variable.
For "regular" reductions we can therefore use the same vector type
(and also the same tree-code) when generating the epilog code and
when generating the code inside the loop. */
vect_reduction_type reduction_type
= STMT_VINFO_VEC_REDUCTION_TYPE (stmt_info);
if (orig_stmt
&& (reduction_type == TREE_CODE_REDUCTION
|| reduction_type == FOLD_LEFT_REDUCTION))
{
/* This is a reduction pattern: get the vectype from the type of the
reduction variable, and get the tree-code from orig_stmt. */
orig_code = gimple_assign_rhs_code (orig_stmt);
gcc_assert (vectype_out);
vec_mode = TYPE_MODE (vectype_out);
}
else
{
/* Regular reduction: use the same vectype and tree-code as used for
the vector code inside the loop can be used for the epilog code. */
orig_code = code;
if (code == MINUS_EXPR)
orig_code = PLUS_EXPR;
/* For simple condition reductions, replace with the actual expression
we want to base our reduction around. */
if (reduction_type == CONST_COND_REDUCTION)
{
orig_code = STMT_VINFO_VEC_CONST_COND_REDUC_CODE (stmt_info);
gcc_assert (orig_code == MAX_EXPR || orig_code == MIN_EXPR);
}
else if (reduction_type == INTEGER_INDUC_COND_REDUCTION)
orig_code = cond_reduc_op_code;
}
if (nested_cycle)
{
def_bb = gimple_bb (reduc_def_stmt);
def_stmt_loop = def_bb->loop_father;
def_arg = PHI_ARG_DEF_FROM_EDGE (reduc_def_stmt,
loop_preheader_edge (def_stmt_loop));
if (TREE_CODE (def_arg) == SSA_NAME
&& (def_arg_stmt = SSA_NAME_DEF_STMT (def_arg))
&& gimple_code (def_arg_stmt) == GIMPLE_PHI
&& flow_bb_inside_loop_p (outer_loop, gimple_bb (def_arg_stmt))
&& vinfo_for_stmt (def_arg_stmt)
&& STMT_VINFO_DEF_TYPE (vinfo_for_stmt (def_arg_stmt))
== vect_double_reduction_def)
double_reduc = true;
}
reduc_fn = IFN_LAST;
if (reduction_type == TREE_CODE_REDUCTION
|| reduction_type == FOLD_LEFT_REDUCTION
|| reduction_type == INTEGER_INDUC_COND_REDUCTION
|| reduction_type == CONST_COND_REDUCTION)
{
if (reduction_type == FOLD_LEFT_REDUCTION
? fold_left_reduction_fn (orig_code, &reduc_fn)
: reduction_fn_for_scalar_code (orig_code, &reduc_fn))
{
if (reduc_fn != IFN_LAST
&& !direct_internal_fn_supported_p (reduc_fn, vectype_out,
OPTIMIZE_FOR_SPEED))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduc op not supported by target.\n");
reduc_fn = IFN_LAST;
}
}
else
{
if (!nested_cycle || double_reduc)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"no reduc code for scalar code.\n");
return false;
}
}
}
else if (reduction_type == COND_REDUCTION)
{
int scalar_precision
= GET_MODE_PRECISION (SCALAR_TYPE_MODE (scalar_type));
cr_index_scalar_type = make_unsigned_type (scalar_precision);
cr_index_vector_type = build_vector_type (cr_index_scalar_type,
nunits_out);
if (direct_internal_fn_supported_p (IFN_REDUC_MAX, cr_index_vector_type,
OPTIMIZE_FOR_SPEED))
reduc_fn = IFN_REDUC_MAX;
}
if (reduction_type != EXTRACT_LAST_REDUCTION
&& reduc_fn == IFN_LAST
&& !nunits_out.is_constant ())
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"missing target support for reduction on"
" variable-length vectors.\n");
return false;
}
if ((double_reduc || reduction_type != TREE_CODE_REDUCTION)
&& ncopies > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in double reduction or condition "
"reduction.\n");
return false;
}
/* For SLP reductions, see if there is a neutral value we can use. */
tree neutral_op = NULL_TREE;
if (slp_node)
neutral_op
= neutral_op_for_slp_reduction (slp_node_instance->reduc_phis, code,
GROUP_FIRST_ELEMENT (stmt_info) != NULL);
if (double_reduc && reduction_type == FOLD_LEFT_REDUCTION)
{
/* We can't support in-order reductions of code such as this:
for (int i = 0; i < n1; ++i)
for (int j = 0; j < n2; ++j)
l += a[j];
since GCC effectively transforms the loop when vectorizing:
for (int i = 0; i < n1 / VF; ++i)
for (int j = 0; j < n2; ++j)
for (int k = 0; k < VF; ++k)
l += a[j];
which is a reassociation of the original operation. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"in-order double reduction not supported.\n");
return false;
}
if (reduction_type == FOLD_LEFT_REDUCTION
&& slp_node
&& !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt)))
{
/* We cannot use in-order reductions in this case because there is
an implicit reassociation of the operations involved. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"in-order unchained SLP reductions not supported.\n");
return false;
}
/* For double reductions, and for SLP reductions with a neutral value,
we construct a variable-length initial vector by loading a vector
full of the neutral value and then shift-and-inserting the start
values into the low-numbered elements. */
if ((double_reduc || neutral_op)
&& !nunits_out.is_constant ()
&& !direct_internal_fn_supported_p (IFN_VEC_SHL_INSERT,
vectype_out, OPTIMIZE_FOR_SPEED))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"reduction on variable-length vectors requires"
" target support for a vector-shift-and-insert"
" operation.\n");
return false;
}
/* Check extra constraints for variable-length unchained SLP reductions. */
if (STMT_SLP_TYPE (stmt_info)
&& !GROUP_FIRST_ELEMENT (vinfo_for_stmt (stmt))
&& !nunits_out.is_constant ())
{
/* We checked above that we could build the initial vector when
there's a neutral element value. Check here for the case in
which each SLP statement has its own initial value and in which
that value needs to be repeated for every instance of the
statement within the initial vector. */
unsigned int group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
scalar_mode elt_mode = SCALAR_TYPE_MODE (TREE_TYPE (vectype_out));
if (!neutral_op
&& !can_duplicate_and_interleave_p (group_size, elt_mode))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported form of SLP reduction for"
" variable-length vectors: cannot build"
" initial vector.\n");
return false;
}
/* The epilogue code relies on the number of elements being a multiple
of the group size. The duplicate-and-interleave approach to setting
up the the initial vector does too. */
if (!multiple_p (nunits_out, group_size))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"unsupported form of SLP reduction for"
" variable-length vectors: the vector size"
" is not a multiple of the number of results.\n");
return false;
}
}
/* In case of widenning multiplication by a constant, we update the type
of the constant to be the type of the other operand. We check that the
constant fits the type in the pattern recognition pass. */
if (code == DOT_PROD_EXPR
&& !types_compatible_p (TREE_TYPE (ops[0]), TREE_TYPE (ops[1])))
{
if (TREE_CODE (ops[0]) == INTEGER_CST)
ops[0] = fold_convert (TREE_TYPE (ops[1]), ops[0]);
else if (TREE_CODE (ops[1]) == INTEGER_CST)
ops[1] = fold_convert (TREE_TYPE (ops[0]), ops[1]);
else
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"invalid types in dot-prod\n");
return false;
}
}
if (reduction_type == COND_REDUCTION)
{
widest_int ni;
if (! max_loop_iterations (loop, &ni))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"loop count not known, cannot create cond "
"reduction.\n");
return false;
}
/* Convert backedges to iterations. */
ni += 1;
/* The additional index will be the same type as the condition. Check
that the loop can fit into this less one (because we'll use up the
zero slot for when there are no matches). */
tree max_index = TYPE_MAX_VALUE (cr_index_scalar_type);
if (wi::geu_p (ni, wi::to_widest (max_index)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"loop size is greater than data size.\n");
return false;
}
}
/* In case the vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
more than one vector stmt - i.e - we need to "unroll" the
vector stmt by a factor VF/nunits. For more details see documentation
in vectorizable_operation. */
/* If the reduction is used in an outer loop we need to generate
VF intermediate results, like so (e.g. for ncopies=2):
r0 = phi (init, r0)
r1 = phi (init, r1)
r0 = x0 + r0;
r1 = x1 + r1;
(i.e. we generate VF results in 2 registers).
In this case we have a separate def-use cycle for each copy, and therefore
for each copy we get the vector def for the reduction variable from the
respective phi node created for this copy.
Otherwise (the reduction is unused in the loop nest), we can combine
together intermediate results, like so (e.g. for ncopies=2):
r = phi (init, r)
r = x0 + r;
r = x1 + r;
(i.e. we generate VF/2 results in a single register).
In this case for each copy we get the vector def for the reduction variable
from the vectorized reduction operation generated in the previous iteration.
This only works when we see both the reduction PHI and its only consumer
in vectorizable_reduction and there are no intermediate stmts
participating. */
use_operand_p use_p;
gimple *use_stmt;
if (ncopies > 1
&& (STMT_VINFO_RELEVANT (stmt_info) <= vect_used_only_live)
&& single_imm_use (gimple_phi_result (reduc_def_stmt), &use_p, &use_stmt)
&& (use_stmt == stmt
|| STMT_VINFO_RELATED_STMT (vinfo_for_stmt (use_stmt)) == stmt))
{
single_defuse_cycle = true;
epilog_copies = 1;
}
else
epilog_copies = ncopies;
/* If the reduction stmt is one of the patterns that have lane
reduction embedded we cannot handle the case of ! single_defuse_cycle. */
if ((ncopies > 1
&& ! single_defuse_cycle)
&& (code == DOT_PROD_EXPR
|| code == WIDEN_SUM_EXPR
|| code == SAD_EXPR))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multi def-use cycle not possible for lane-reducing "
"reduction operation\n");
return false;
}
if (slp_node)
vec_num = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
else
vec_num = 1;
internal_fn cond_fn = get_conditional_internal_fn (code);
vec_loop_masks *masks = &LOOP_VINFO_MASKS (loop_vinfo);
if (!vec_stmt) /* transformation not required. */
{
if (first_p)
vect_model_reduction_cost (stmt_info, reduc_fn, ncopies);
if (loop_vinfo && LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
{
if (reduction_type != FOLD_LEFT_REDUCTION
&& (cond_fn == IFN_LAST
|| !direct_internal_fn_supported_p (cond_fn, vectype_in,
OPTIMIZE_FOR_SPEED)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because no"
" conditional operation is available.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
}
else if (reduc_index == -1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop for chained"
" reductions.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
}
else
vect_record_loop_mask (loop_vinfo, masks, ncopies * vec_num,
vectype_in);
}
if (dump_enabled_p ()
&& reduction_type == FOLD_LEFT_REDUCTION)
dump_printf_loc (MSG_NOTE, vect_location,
"using an in-order (fold-left) reduction.\n");
STMT_VINFO_TYPE (stmt_info) = reduc_vec_info_type;
return true;
}
/* Transform. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform reduction.\n");
/* FORNOW: Multiple types are not supported for condition. */
if (code == COND_EXPR)
gcc_assert (ncopies == 1);
bool masked_loop_p = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
if (reduction_type == FOLD_LEFT_REDUCTION)
return vectorize_fold_left_reduction
(stmt, gsi, vec_stmt, slp_node, reduc_def_stmt, code,
reduc_fn, ops, vectype_in, reduc_index, masks);
if (reduction_type == EXTRACT_LAST_REDUCTION)
{
gcc_assert (!slp_node);
return vectorizable_condition (stmt, gsi, vec_stmt,
NULL, reduc_index, NULL);
}
/* Create the destination vector */
vec_dest = vect_create_destination_var (scalar_dest, vectype_out);
prev_stmt_info = NULL;
prev_phi_info = NULL;
if (!slp_node)
{
vec_oprnds0.create (1);
vec_oprnds1.create (1);
if (op_type == ternary_op)
vec_oprnds2.create (1);
}
phis.create (vec_num);
vect_defs.create (vec_num);
if (!slp_node)
vect_defs.quick_push (NULL_TREE);
if (slp_node)
phis.splice (SLP_TREE_VEC_STMTS (slp_node_instance->reduc_phis));
else
phis.quick_push (STMT_VINFO_VEC_STMT (vinfo_for_stmt (reduc_def_stmt)));
for (j = 0; j < ncopies; j++)
{
if (code == COND_EXPR)
{
gcc_assert (!slp_node);
vectorizable_condition (stmt, gsi, vec_stmt,
PHI_RESULT (phis[0]),
reduc_index, NULL);
/* Multiple types are not supported for condition. */
break;
}
/* Handle uses. */
if (j == 0)
{
if (slp_node)
{
/* Get vec defs for all the operands except the reduction index,
ensuring the ordering of the ops in the vector is kept. */
auto_vec<tree, 3> slp_ops;
auto_vec<vec<tree>, 3> vec_defs;
slp_ops.quick_push (ops[0]);
slp_ops.quick_push (ops[1]);
if (op_type == ternary_op)
slp_ops.quick_push (ops[2]);
vect_get_slp_defs (slp_ops, slp_node, &vec_defs);
vec_oprnds0.safe_splice (vec_defs[0]);
vec_defs[0].release ();
vec_oprnds1.safe_splice (vec_defs[1]);
vec_defs[1].release ();
if (op_type == ternary_op)
{
vec_oprnds2.safe_splice (vec_defs[2]);
vec_defs[2].release ();
}
}
else
{
vec_oprnds0.quick_push
(vect_get_vec_def_for_operand (ops[0], stmt));
vec_oprnds1.quick_push
(vect_get_vec_def_for_operand (ops[1], stmt));
if (op_type == ternary_op)
vec_oprnds2.quick_push
(vect_get_vec_def_for_operand (ops[2], stmt));
}
}
else
{
if (!slp_node)
{
gcc_assert (reduc_index != -1 || ! single_defuse_cycle);
if (single_defuse_cycle && reduc_index == 0)
vec_oprnds0[0] = gimple_get_lhs (new_stmt);
else
vec_oprnds0[0]
= vect_get_vec_def_for_stmt_copy (dts[0], vec_oprnds0[0]);
if (single_defuse_cycle && reduc_index == 1)
vec_oprnds1[0] = gimple_get_lhs (new_stmt);
else
vec_oprnds1[0]
= vect_get_vec_def_for_stmt_copy (dts[1], vec_oprnds1[0]);
if (op_type == ternary_op)
{
if (single_defuse_cycle && reduc_index == 2)
vec_oprnds2[0] = gimple_get_lhs (new_stmt);
else
vec_oprnds2[0]
= vect_get_vec_def_for_stmt_copy (dts[2], vec_oprnds2[0]);
}
}
}
FOR_EACH_VEC_ELT (vec_oprnds0, i, def0)
{
tree vop[3] = { def0, vec_oprnds1[i], NULL_TREE };
if (masked_loop_p)
{
/* Make sure that the reduction accumulator is vop[0]. */
if (reduc_index == 1)
{
gcc_assert (commutative_tree_code (code));
std::swap (vop[0], vop[1]);
}
tree mask = vect_get_loop_mask (gsi, masks, vec_num * ncopies,
vectype_in, i * ncopies + j);
gcall *call = gimple_build_call_internal (cond_fn, 3, mask,
vop[0], vop[1]);
new_temp = make_ssa_name (vec_dest, call);
gimple_call_set_lhs (call, new_temp);
gimple_call_set_nothrow (call, true);
new_stmt = call;
}
else
{
if (op_type == ternary_op)
vop[2] = vec_oprnds2[i];
new_temp = make_ssa_name (vec_dest, new_stmt);
new_stmt = gimple_build_assign (new_temp, code,
vop[0], vop[1], vop[2]);
}
vect_finish_stmt_generation (stmt, new_stmt, gsi);
if (slp_node)
{
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
vect_defs.quick_push (new_temp);
}
else
vect_defs[0] = new_temp;
}
if (slp_node)
continue;
if (j == 0)
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = new_stmt;
else
STMT_VINFO_RELATED_STMT (prev_stmt_info) = new_stmt;
prev_stmt_info = vinfo_for_stmt (new_stmt);
}
/* Finalize the reduction-phi (set its arguments) and create the
epilog reduction code. */
if ((!single_defuse_cycle || code == COND_EXPR) && !slp_node)
vect_defs[0] = gimple_get_lhs (*vec_stmt);
vect_create_epilog_for_reduction (vect_defs, stmt, reduc_def_stmt,
epilog_copies, reduc_fn, phis,
double_reduc, slp_node, slp_node_instance,
cond_reduc_val, cond_reduc_op_code,
neutral_op);
return true;
}
/* Function vect_min_worthwhile_factor.
For a loop where we could vectorize the operation indicated by CODE,
return the minimum vectorization factor that makes it worthwhile
to use generic vectors. */
static unsigned int
vect_min_worthwhile_factor (enum tree_code code)
{
switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
case NEGATE_EXPR:
return 4;
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case BIT_NOT_EXPR:
return 2;
default:
return INT_MAX;
}
}
/* Return true if VINFO indicates we are doing loop vectorization and if
it is worth decomposing CODE operations into scalar operations for
that loop's vectorization factor. */
bool
vect_worthwhile_without_simd_p (vec_info *vinfo, tree_code code)
{
loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
unsigned HOST_WIDE_INT value;
return (loop_vinfo
&& LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&value)
&& value >= vect_min_worthwhile_factor (code));
}
/* Function vectorizable_induction
Check if PHI performs an induction computation that can be vectorized.
If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
phi to replace it, put it in VEC_STMT, and add it to the same basic block.
Return FALSE if not a vectorizable STMT, TRUE otherwise. */
bool
vectorizable_induction (gimple *phi,
gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
gimple **vec_stmt, slp_tree slp_node)
{
stmt_vec_info stmt_info = vinfo_for_stmt (phi);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
unsigned ncopies;
bool nested_in_vect_loop = false;
struct loop *iv_loop;
tree vec_def;
edge pe = loop_preheader_edge (loop);
basic_block new_bb;
tree new_vec, vec_init, vec_step, t;
tree new_name;
gimple *new_stmt;
gphi *induction_phi;
tree induc_def, vec_dest;
tree init_expr, step_expr;
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
unsigned i;
tree expr;
gimple_seq stmts;
imm_use_iterator imm_iter;
use_operand_p use_p;
gimple *exit_phi;
edge latch_e;
tree loop_arg;
gimple_stmt_iterator si;
basic_block bb = gimple_bb (phi);
if (gimple_code (phi) != GIMPLE_PHI)
return false;
if (!STMT_VINFO_RELEVANT_P (stmt_info))
return false;
/* Make sure it was recognized as induction computation. */
if (STMT_VINFO_DEF_TYPE (stmt_info) != vect_induction_def)
return false;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
if (slp_node)
ncopies = 1;
else
ncopies = vect_get_num_copies (loop_vinfo, vectype);
gcc_assert (ncopies >= 1);
/* FORNOW. These restrictions should be relaxed. */
if (nested_in_vect_loop_p (loop, phi))
{
imm_use_iterator imm_iter;
use_operand_p use_p;
gimple *exit_phi;
edge latch_e;
tree loop_arg;
if (ncopies > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"multiple types in nested loop.\n");
return false;
}
/* FORNOW: outer loop induction with SLP not supported. */
if (STMT_SLP_TYPE (stmt_info))
return false;
exit_phi = NULL;
latch_e = loop_latch_edge (loop->inner);
loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (!flow_bb_inside_loop_p (loop->inner, gimple_bb (use_stmt)))
{
exit_phi = use_stmt;
break;
}
}
if (exit_phi)
{
stmt_vec_info exit_phi_vinfo = vinfo_for_stmt (exit_phi);
if (!(STMT_VINFO_RELEVANT_P (exit_phi_vinfo)
&& !STMT_VINFO_LIVE_P (exit_phi_vinfo)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"inner-loop induction only used outside "
"of the outer vectorized loop.\n");
return false;
}
}
nested_in_vect_loop = true;
iv_loop = loop->inner;
}
else
iv_loop = loop;
gcc_assert (iv_loop == (gimple_bb (phi))->loop_father);
if (slp_node && !nunits.is_constant ())
{
/* The current SLP code creates the initial value element-by-element. */
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"SLP induction not supported for variable-length"
" vectors.\n");
return false;
}
if (!vec_stmt) /* transformation not required. */
{
STMT_VINFO_TYPE (stmt_info) = induc_vec_info_type;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== vectorizable_induction ===\n");
vect_model_induction_cost (stmt_info, ncopies);
return true;
}
/* Transform. */
/* Compute a vector variable, initialized with the first VF values of
the induction variable. E.g., for an iv with IV_PHI='X' and
evolution S, for a vector of 4 units, we want to compute:
[X, X + S, X + 2*S, X + 3*S]. */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform induction phi.\n");
latch_e = loop_latch_edge (iv_loop);
loop_arg = PHI_ARG_DEF_FROM_EDGE (phi, latch_e);
step_expr = STMT_VINFO_LOOP_PHI_EVOLUTION_PART (stmt_info);
gcc_assert (step_expr != NULL_TREE);
pe = loop_preheader_edge (iv_loop);
init_expr = PHI_ARG_DEF_FROM_EDGE (phi,
loop_preheader_edge (iv_loop));
stmts = NULL;
if (!nested_in_vect_loop)
{
/* Convert the initial value to the desired type. */
tree new_type = TREE_TYPE (vectype);
init_expr = gimple_convert (&stmts, new_type, init_expr);
/* If we are using the loop mask to "peel" for alignment then we need
to adjust the start value here. */
tree skip_niters = LOOP_VINFO_MASK_SKIP_NITERS (loop_vinfo);
if (skip_niters != NULL_TREE)
{
if (FLOAT_TYPE_P (vectype))
skip_niters = gimple_build (&stmts, FLOAT_EXPR, new_type,
skip_niters);
else
skip_niters = gimple_convert (&stmts, new_type, skip_niters);
tree skip_step = gimple_build (&stmts, MULT_EXPR, new_type,
skip_niters, step_expr);
init_expr = gimple_build (&stmts, MINUS_EXPR, new_type,
init_expr, skip_step);
}
}
/* Convert the step to the desired type. */
step_expr = gimple_convert (&stmts, TREE_TYPE (vectype), step_expr);
if (stmts)
{
new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
gcc_assert (!new_bb);
}
/* Find the first insertion point in the BB. */
si = gsi_after_labels (bb);
/* For SLP induction we have to generate several IVs as for example
with group size 3 we need [i, i, i, i + S] [i + S, i + S, i + 2*S, i + 2*S]
[i + 2*S, i + 3*S, i + 3*S, i + 3*S]. The step is the same uniform
[VF*S, VF*S, VF*S, VF*S] for all. */
if (slp_node)
{
/* Enforced above. */
unsigned int const_nunits = nunits.to_constant ();
/* Generate [VF*S, VF*S, ... ]. */
if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
{
expr = build_int_cst (integer_type_node, vf);
expr = fold_convert (TREE_TYPE (step_expr), expr);
}
else
expr = build_int_cst (TREE_TYPE (step_expr), vf);
new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
expr, step_expr);
if (! CONSTANT_CLASS_P (new_name))
new_name = vect_init_vector (phi, new_name,
TREE_TYPE (step_expr), NULL);
new_vec = build_vector_from_val (vectype, new_name);
vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
/* Now generate the IVs. */
unsigned group_size = SLP_TREE_SCALAR_STMTS (slp_node).length ();
unsigned nvects = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
unsigned elts = const_nunits * nvects;
unsigned nivs = least_common_multiple (group_size,
const_nunits) / const_nunits;
gcc_assert (elts % group_size == 0);
tree elt = init_expr;
unsigned ivn;
for (ivn = 0; ivn < nivs; ++ivn)
{
tree_vector_builder elts (vectype, const_nunits, 1);
stmts = NULL;
for (unsigned eltn = 0; eltn < const_nunits; ++eltn)
{
if (ivn*const_nunits + eltn >= group_size
&& (ivn * const_nunits + eltn) % group_size == 0)
elt = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (elt),
elt, step_expr);
elts.quick_push (elt);
}
vec_init = gimple_build_vector (&stmts, &elts);
if (stmts)
{
new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
gcc_assert (!new_bb);
}
/* Create the induction-phi that defines the induction-operand. */
vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
induction_phi = create_phi_node (vec_dest, iv_loop->header);
set_vinfo_for_stmt (induction_phi,
new_stmt_vec_info (induction_phi, loop_vinfo));
induc_def = PHI_RESULT (induction_phi);
/* Create the iv update inside the loop */
vec_def = make_ssa_name (vec_dest);
new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo));
/* Set the arguments of the phi node: */
add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
UNKNOWN_LOCATION);
SLP_TREE_VEC_STMTS (slp_node).quick_push (induction_phi);
}
/* Re-use IVs when we can. */
if (ivn < nvects)
{
unsigned vfp
= least_common_multiple (group_size, const_nunits) / group_size;
/* Generate [VF'*S, VF'*S, ... ]. */
if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
{
expr = build_int_cst (integer_type_node, vfp);
expr = fold_convert (TREE_TYPE (step_expr), expr);
}
else
expr = build_int_cst (TREE_TYPE (step_expr), vfp);
new_name = fold_build2 (MULT_EXPR, TREE_TYPE (step_expr),
expr, step_expr);
if (! CONSTANT_CLASS_P (new_name))
new_name = vect_init_vector (phi, new_name,
TREE_TYPE (step_expr), NULL);
new_vec = build_vector_from_val (vectype, new_name);
vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
for (; ivn < nvects; ++ivn)
{
gimple *iv = SLP_TREE_VEC_STMTS (slp_node)[ivn - nivs];
tree def;
if (gimple_code (iv) == GIMPLE_PHI)
def = gimple_phi_result (iv);
else
def = gimple_assign_lhs (iv);
new_stmt = gimple_build_assign (make_ssa_name (vectype),
PLUS_EXPR,
def, vec_step);
if (gimple_code (iv) == GIMPLE_PHI)
gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
else
{
gimple_stmt_iterator tgsi = gsi_for_stmt (iv);
gsi_insert_after (&tgsi, new_stmt, GSI_CONTINUE_LINKING);
}
set_vinfo_for_stmt (new_stmt,
new_stmt_vec_info (new_stmt, loop_vinfo));
SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt);
}
}
return true;
}
/* Create the vector that holds the initial_value of the induction. */
if (nested_in_vect_loop)
{
/* iv_loop is nested in the loop to be vectorized. init_expr had already
been created during vectorization of previous stmts. We obtain it
from the STMT_VINFO_VEC_STMT of the defining stmt. */
vec_init = vect_get_vec_def_for_operand (init_expr, phi);
/* If the initial value is not of proper type, convert it. */
if (!useless_type_conversion_p (vectype, TREE_TYPE (vec_init)))
{
new_stmt
= gimple_build_assign (vect_get_new_ssa_name (vectype,
vect_simple_var,
"vec_iv_"),
VIEW_CONVERT_EXPR,
build1 (VIEW_CONVERT_EXPR, vectype,
vec_init));
vec_init = gimple_assign_lhs (new_stmt);
new_bb = gsi_insert_on_edge_immediate (loop_preheader_edge (iv_loop),
new_stmt);
gcc_assert (!new_bb);
set_vinfo_for_stmt (new_stmt,
new_stmt_vec_info (new_stmt, loop_vinfo));
}
}
else
{
/* iv_loop is the loop to be vectorized. Create:
vec_init = [X, X+S, X+2*S, X+3*S] (S = step_expr, X = init_expr) */
stmts = NULL;
new_name = gimple_convert (&stmts, TREE_TYPE (vectype), init_expr);
unsigned HOST_WIDE_INT const_nunits;
if (nunits.is_constant (&const_nunits))
{
tree_vector_builder elts (vectype, const_nunits, 1);
elts.quick_push (new_name);
for (i = 1; i < const_nunits; i++)
{
/* Create: new_name_i = new_name + step_expr */
new_name = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (new_name),
new_name, step_expr);
elts.quick_push (new_name);
}
/* Create a vector from [new_name_0, new_name_1, ...,
new_name_nunits-1] */
vec_init = gimple_build_vector (&stmts, &elts);
}
else if (INTEGRAL_TYPE_P (TREE_TYPE (step_expr)))
/* Build the initial value directly from a VEC_SERIES_EXPR. */
vec_init = gimple_build (&stmts, VEC_SERIES_EXPR, vectype,
new_name, step_expr);
else
{
/* Build:
[base, base, base, ...]
+ (vectype) [0, 1, 2, ...] * [step, step, step, ...]. */
gcc_assert (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)));
gcc_assert (flag_associative_math);
tree index = build_index_vector (vectype, 0, 1);
tree base_vec = gimple_build_vector_from_val (&stmts, vectype,
new_name);
tree step_vec = gimple_build_vector_from_val (&stmts, vectype,
step_expr);
vec_init = gimple_build (&stmts, FLOAT_EXPR, vectype, index);
vec_init = gimple_build (&stmts, MULT_EXPR, vectype,
vec_init, step_vec);
vec_init = gimple_build (&stmts, PLUS_EXPR, vectype,
vec_init, base_vec);
}
if (stmts)
{
new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
gcc_assert (!new_bb);
}
}
/* Create the vector that holds the step of the induction. */
if (nested_in_vect_loop)
/* iv_loop is nested in the loop to be vectorized. Generate:
vec_step = [S, S, S, S] */
new_name = step_expr;
else
{
/* iv_loop is the loop to be vectorized. Generate:
vec_step = [VF*S, VF*S, VF*S, VF*S] */
gimple_seq seq = NULL;
if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
{
expr = build_int_cst (integer_type_node, vf);
expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
}
else
expr = build_int_cst (TREE_TYPE (step_expr), vf);
new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
expr, step_expr);
if (seq)
{
new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
gcc_assert (!new_bb);
}
}
t = unshare_expr (new_name);
gcc_assert (CONSTANT_CLASS_P (new_name)
|| TREE_CODE (new_name) == SSA_NAME);
new_vec = build_vector_from_val (vectype, t);
vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
/* Create the following def-use cycle:
loop prolog:
vec_init = ...
vec_step = ...
loop:
vec_iv = PHI <vec_init, vec_loop>
...
STMT
...
vec_loop = vec_iv + vec_step; */
/* Create the induction-phi that defines the induction-operand. */
vec_dest = vect_get_new_vect_var (vectype, vect_simple_var, "vec_iv_");
induction_phi = create_phi_node (vec_dest, iv_loop->header);
set_vinfo_for_stmt (induction_phi,
new_stmt_vec_info (induction_phi, loop_vinfo));
induc_def = PHI_RESULT (induction_phi);
/* Create the iv update inside the loop */
vec_def = make_ssa_name (vec_dest);
new_stmt = gimple_build_assign (vec_def, PLUS_EXPR, induc_def, vec_step);
gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
set_vinfo_for_stmt (new_stmt, new_stmt_vec_info (new_stmt, loop_vinfo));
/* Set the arguments of the phi node: */
add_phi_arg (induction_phi, vec_init, pe, UNKNOWN_LOCATION);
add_phi_arg (induction_phi, vec_def, loop_latch_edge (iv_loop),
UNKNOWN_LOCATION);
STMT_VINFO_VEC_STMT (stmt_info) = *vec_stmt = induction_phi;
/* In case that vectorization factor (VF) is bigger than the number
of elements that we can fit in a vectype (nunits), we have to generate
more than one vector stmt - i.e - we need to "unroll" the
vector stmt by a factor VF/nunits. For more details see documentation
in vectorizable_operation. */
if (ncopies > 1)
{
gimple_seq seq = NULL;
stmt_vec_info prev_stmt_vinfo;
/* FORNOW. This restriction should be relaxed. */
gcc_assert (!nested_in_vect_loop);
/* Create the vector that holds the step of the induction. */
if (SCALAR_FLOAT_TYPE_P (TREE_TYPE (step_expr)))
{
expr = build_int_cst (integer_type_node, nunits);
expr = gimple_build (&seq, FLOAT_EXPR, TREE_TYPE (step_expr), expr);
}
else
expr = build_int_cst (TREE_TYPE (step_expr), nunits);
new_name = gimple_build (&seq, MULT_EXPR, TREE_TYPE (step_expr),
expr, step_expr);
if (seq)
{
new_bb = gsi_insert_seq_on_edge_immediate (pe, seq);
gcc_assert (!new_bb);
}
t = unshare_expr (new_name);
gcc_assert (CONSTANT_CLASS_P (new_name)
|| TREE_CODE (new_name) == SSA_NAME);
new_vec = build_vector_from_val (vectype, t);
vec_step = vect_init_vector (phi, new_vec, vectype, NULL);
vec_def = induc_def;
prev_stmt_vinfo = vinfo_for_stmt (induction_phi);
for (i = 1; i < ncopies; i++)
{
/* vec_i = vec_prev + vec_step */
new_stmt = gimple_build_assign (vec_dest, PLUS_EXPR,
vec_def, vec_step);
vec_def = make_ssa_name (vec_dest, new_stmt);
gimple_assign_set_lhs (new_stmt, vec_def);
gsi_insert_before (&si, new_stmt, GSI_SAME_STMT);
set_vinfo_for_stmt (new_stmt,
new_stmt_vec_info (new_stmt, loop_vinfo));
STMT_VINFO_RELATED_STMT (prev_stmt_vinfo) = new_stmt;
prev_stmt_vinfo = vinfo_for_stmt (new_stmt);
}
}
if (nested_in_vect_loop)
{
/* Find the loop-closed exit-phi of the induction, and record
the final vector of induction results: */
exit_phi = NULL;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, loop_arg)
{
gimple *use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (!flow_bb_inside_loop_p (iv_loop, gimple_bb (use_stmt)))
{
exit_phi = use_stmt;
break;
}
}
if (exit_phi)
{
stmt_vec_info stmt_vinfo = vinfo_for_stmt (exit_phi);
/* FORNOW. Currently not supporting the case that an inner-loop induction
is not used in the outer-loop (i.e. only outside the outer-loop). */
gcc_assert (STMT_VINFO_RELEVANT_P (stmt_vinfo)
&& !STMT_VINFO_LIVE_P (stmt_vinfo));
STMT_VINFO_VEC_STMT (stmt_vinfo) = new_stmt;
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"vector of inductions after inner-loop:");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, new_stmt, 0);
}
}
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"transform induction: created def-use cycle: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, induction_phi, 0);
dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
SSA_NAME_DEF_STMT (vec_def), 0);
}
return true;
}
/* Function vectorizable_live_operation.
STMT computes a value that is used outside the loop. Check if
it can be supported. */
bool
vectorizable_live_operation (gimple *stmt,
gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED,
slp_tree slp_node, int slp_index,
gimple **vec_stmt)
{
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
imm_use_iterator imm_iter;
tree lhs, lhs_type, bitsize, vec_bitsize;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
int ncopies;
gimple *use_stmt;
auto_vec<tree> vec_oprnds;
int vec_entry = 0;
poly_uint64 vec_index = 0;
gcc_assert (STMT_VINFO_LIVE_P (stmt_info));
if (STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def)
return false;
/* FORNOW. CHECKME. */
if (nested_in_vect_loop_p (loop, stmt))
return false;
/* If STMT is not relevant and it is a simple assignment and its inputs are
invariant then it can remain in place, unvectorized. The original last
scalar value that it computes will be used. */
if (!STMT_VINFO_RELEVANT_P (stmt_info))
{
gcc_assert (is_simple_and_all_uses_invariant (stmt, loop_vinfo));
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"statement is simple and uses invariant. Leaving in "
"place.\n");
return true;
}
if (slp_node)
ncopies = 1;
else
ncopies = vect_get_num_copies (loop_vinfo, vectype);
if (slp_node)
{
gcc_assert (slp_index >= 0);
int num_scalar = SLP_TREE_SCALAR_STMTS (slp_node).length ();
int num_vec = SLP_TREE_NUMBER_OF_VEC_STMTS (slp_node);
/* Get the last occurrence of the scalar index from the concatenation of
all the slp vectors. Calculate which slp vector it is and the index
within. */
poly_uint64 pos = (num_vec * nunits) - num_scalar + slp_index;
/* Calculate which vector contains the result, and which lane of
that vector we need. */
if (!can_div_trunc_p (pos, nunits, &vec_entry, &vec_index))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"Cannot determine which vector holds the"
" final result.\n");
return false;
}
}
if (!vec_stmt)
{
/* No transformation required. */
if (LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo))
{
if (!direct_internal_fn_supported_p (IFN_EXTRACT_LAST, vectype,
OPTIMIZE_FOR_SPEED))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because "
"the target doesn't support extract last "
"reduction.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
}
else if (slp_node)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because an "
"SLP statement is live after the loop.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
}
else if (ncopies > 1)
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"can't use a fully-masked loop because"
" ncopies is greater than 1.\n");
LOOP_VINFO_CAN_FULLY_MASK_P (loop_vinfo) = false;
}
else
{
gcc_assert (ncopies == 1 && !slp_node);
vect_record_loop_mask (loop_vinfo,
&LOOP_VINFO_MASKS (loop_vinfo),
1, vectype);
}
}
return true;
}
/* If stmt has a related stmt, then use that for getting the lhs. */
if (is_pattern_stmt_p (stmt_info))
stmt = STMT_VINFO_RELATED_STMT (stmt_info);
lhs = (is_a <gphi *> (stmt)) ? gimple_phi_result (stmt)
: gimple_get_lhs (stmt);
lhs_type = TREE_TYPE (lhs);
bitsize = (VECTOR_BOOLEAN_TYPE_P (vectype)
? bitsize_int (TYPE_PRECISION (TREE_TYPE (vectype)))
: TYPE_SIZE (TREE_TYPE (vectype)));
vec_bitsize = TYPE_SIZE (vectype);
/* Get the vectorized lhs of STMT and the lane to use (counted in bits). */
tree vec_lhs, bitstart;
if (slp_node)
{
gcc_assert (!LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
/* Get the correct slp vectorized stmt. */
gimple *vec_stmt = SLP_TREE_VEC_STMTS (slp_node)[vec_entry];
if (gphi *phi = dyn_cast <gphi *> (vec_stmt))
vec_lhs = gimple_phi_result (phi);
else
vec_lhs = gimple_get_lhs (vec_stmt);
/* Get entry to use. */
bitstart = bitsize_int (vec_index);
bitstart = int_const_binop (MULT_EXPR, bitsize, bitstart);
}
else
{
enum vect_def_type dt = STMT_VINFO_DEF_TYPE (stmt_info);
vec_lhs = vect_get_vec_def_for_operand_1 (stmt, dt);
gcc_checking_assert (ncopies == 1
|| !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo));
/* For multiple copies, get the last copy. */
for (int i = 1; i < ncopies; ++i)
vec_lhs = vect_get_vec_def_for_stmt_copy (vect_unknown_def_type,
vec_lhs);
/* Get the last lane in the vector. */
bitstart = int_const_binop (MINUS_EXPR, vec_bitsize, bitsize);
}
gimple_seq stmts = NULL;
tree new_tree;
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
{
/* Emit:
SCALAR_RES = EXTRACT_LAST <VEC_LHS, MASK>
where VEC_LHS is the vectorized live-out result and MASK is
the loop mask for the final iteration. */
gcc_assert (ncopies == 1 && !slp_node);
tree scalar_type = TREE_TYPE (STMT_VINFO_VECTYPE (stmt_info));
tree scalar_res = make_ssa_name (scalar_type);
tree mask = vect_get_loop_mask (gsi, &LOOP_VINFO_MASKS (loop_vinfo),
1, vectype, 0);
gcall *new_stmt = gimple_build_call_internal (IFN_EXTRACT_LAST,
2, mask, vec_lhs);
gimple_call_set_lhs (new_stmt, scalar_res);
gimple_seq_add_stmt (&stmts, new_stmt);
/* Convert the extracted vector element to the required scalar type. */
new_tree = gimple_convert (&stmts, lhs_type, scalar_res);
}
else
{
tree bftype = TREE_TYPE (vectype);
if (VECTOR_BOOLEAN_TYPE_P (vectype))
bftype = build_nonstandard_integer_type (tree_to_uhwi (bitsize), 1);
new_tree = build3 (BIT_FIELD_REF, bftype, vec_lhs, bitsize, bitstart);
new_tree = force_gimple_operand (fold_convert (lhs_type, new_tree),
&stmts, true, NULL_TREE);
}
if (stmts)
gsi_insert_seq_on_edge_immediate (single_exit (loop), stmts);
/* Replace use of lhs with newly computed result. If the use stmt is a
single arg PHI, just replace all uses of PHI result. It's necessary
because lcssa PHI defining lhs may be before newly inserted stmt. */
use_operand_p use_p;
FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, lhs)
if (!flow_bb_inside_loop_p (loop, gimple_bb (use_stmt))
&& !is_gimple_debug (use_stmt))
{
if (gimple_code (use_stmt) == GIMPLE_PHI
&& gimple_phi_num_args (use_stmt) == 1)
{
replace_uses_by (gimple_phi_result (use_stmt), new_tree);
}
else
{
FOR_EACH_IMM_USE_ON_STMT (use_p, imm_iter)
SET_USE (use_p, new_tree);
}
update_stmt (use_stmt);
}
return true;
}
/* Kill any debug uses outside LOOP of SSA names defined in STMT. */
static void
vect_loop_kill_debug_uses (struct loop *loop, gimple *stmt)
{
ssa_op_iter op_iter;
imm_use_iterator imm_iter;
def_operand_p def_p;
gimple *ustmt;
FOR_EACH_PHI_OR_STMT_DEF (def_p, stmt, op_iter, SSA_OP_DEF)
{
FOR_EACH_IMM_USE_STMT (ustmt, imm_iter, DEF_FROM_PTR (def_p))
{
basic_block bb;
if (!is_gimple_debug (ustmt))
continue;
bb = gimple_bb (ustmt);
if (!flow_bb_inside_loop_p (loop, bb))
{
if (gimple_debug_bind_p (ustmt))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"killing debug use\n");
gimple_debug_bind_reset_value (ustmt);
update_stmt (ustmt);
}
else
gcc_unreachable ();
}
}
}
}
/* Given loop represented by LOOP_VINFO, return true if computation of
LOOP_VINFO_NITERS (= LOOP_VINFO_NITERSM1 + 1) doesn't overflow, false
otherwise. */
static bool
loop_niters_no_overflow (loop_vec_info loop_vinfo)
{
/* Constant case. */
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
tree cst_niters = LOOP_VINFO_NITERS (loop_vinfo);
tree cst_nitersm1 = LOOP_VINFO_NITERSM1 (loop_vinfo);
gcc_assert (TREE_CODE (cst_niters) == INTEGER_CST);
gcc_assert (TREE_CODE (cst_nitersm1) == INTEGER_CST);
if (wi::to_widest (cst_nitersm1) < wi::to_widest (cst_niters))
return true;
}
widest_int max;
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
/* Check the upper bound of loop niters. */
if (get_max_loop_iterations (loop, &max))
{
tree type = TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo));
signop sgn = TYPE_SIGN (type);
widest_int type_max = widest_int::from (wi::max_value (type), sgn);
if (max < type_max)
return true;
}
return false;
}
/* Return a mask type with half the number of elements as TYPE. */
tree
vect_halve_mask_nunits (tree type)
{
poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (type), 2);
return build_truth_vector_type (nunits, current_vector_size);
}
/* Return a mask type with twice as many elements as TYPE. */
tree
vect_double_mask_nunits (tree type)
{
poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type) * 2;
return build_truth_vector_type (nunits, current_vector_size);
}
/* Record that a fully-masked version of LOOP_VINFO would need MASKS to
contain a sequence of NVECTORS masks that each control a vector of type
VECTYPE. */
void
vect_record_loop_mask (loop_vec_info loop_vinfo, vec_loop_masks *masks,
unsigned int nvectors, tree vectype)
{
gcc_assert (nvectors != 0);
if (masks->length () < nvectors)
masks->safe_grow_cleared (nvectors);
rgroup_masks *rgm = &(*masks)[nvectors - 1];
/* The number of scalars per iteration and the number of vectors are
both compile-time constants. */
unsigned int nscalars_per_iter
= exact_div (nvectors * TYPE_VECTOR_SUBPARTS (vectype),
LOOP_VINFO_VECT_FACTOR (loop_vinfo)).to_constant ();
if (rgm->max_nscalars_per_iter < nscalars_per_iter)
{
rgm->max_nscalars_per_iter = nscalars_per_iter;
rgm->mask_type = build_same_sized_truth_vector_type (vectype);
}
}
/* Given a complete set of masks MASKS, extract mask number INDEX
for an rgroup that operates on NVECTORS vectors of type VECTYPE,
where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI.
See the comment above vec_loop_masks for more details about the mask
arrangement. */
tree
vect_get_loop_mask (gimple_stmt_iterator *gsi, vec_loop_masks *masks,
unsigned int nvectors, tree vectype, unsigned int index)
{
rgroup_masks *rgm = &(*masks)[nvectors - 1];
tree mask_type = rgm->mask_type;
/* Populate the rgroup's mask array, if this is the first time we've
used it. */
if (rgm->masks.is_empty ())
{
rgm->masks.safe_grow_cleared (nvectors);
for (unsigned int i = 0; i < nvectors; ++i)
{
tree mask = make_temp_ssa_name (mask_type, NULL, "loop_mask");
/* Provide a dummy definition until the real one is available. */
SSA_NAME_DEF_STMT (mask) = gimple_build_nop ();
rgm->masks[i] = mask;
}
}
tree mask = rgm->masks[index];
if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
TYPE_VECTOR_SUBPARTS (vectype)))
{
/* A loop mask for data type X can be reused for data type Y
if X has N times more elements than Y and if Y's elements
are N times bigger than X's. In this case each sequence
of N elements in the loop mask will be all-zero or all-one.
We can then view-convert the mask so that each sequence of
N elements is replaced by a single element. */
gcc_assert (multiple_p (TYPE_VECTOR_SUBPARTS (mask_type),
TYPE_VECTOR_SUBPARTS (vectype)));
gimple_seq seq = NULL;
mask_type = build_same_sized_truth_vector_type (vectype);
mask = gimple_build (&seq, VIEW_CONVERT_EXPR, mask_type, mask);
if (seq)
gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT);
}
return mask;
}
/* Scale profiling counters by estimation for LOOP which is vectorized
by factor VF. */
static void
scale_profile_for_vect_loop (struct loop *loop, unsigned vf)
{
edge preheader = loop_preheader_edge (loop);
/* Reduce loop iterations by the vectorization factor. */
gcov_type new_est_niter = niter_for_unrolled_loop (loop, vf);
profile_count freq_h = loop->header->count, freq_e = preheader->count ();
if (freq_h.nonzero_p ())
{
profile_probability p;
/* Avoid dropping loop body profile counter to 0 because of zero count
in loop's preheader. */
if (!(freq_e == profile_count::zero ()))
freq_e = freq_e.force_nonzero ();
p = freq_e.apply_scale (new_est_niter + 1, 1).probability_in (freq_h);
scale_loop_frequencies (loop, p);
}
edge exit_e = single_exit (loop);
exit_e->probability = profile_probability::always ()
.apply_scale (1, new_est_niter + 1);
edge exit_l = single_pred_edge (loop->latch);
profile_probability prob = exit_l->probability;
exit_l->probability = exit_e->probability.invert ();
if (prob.initialized_p () && exit_l->probability.initialized_p ())
scale_bbs_frequencies (&loop->latch, 1, exit_l->probability / prob);
}
/* Function vect_transform_loop.
The analysis phase has determined that the loop is vectorizable.
Vectorize the loop - created vectorized stmts to replace the scalar
stmts in the loop, and update the loop exit condition.
Returns scalar epilogue loop if any. */
struct loop *
vect_transform_loop (loop_vec_info loop_vinfo)
{
struct loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
struct loop *epilogue = NULL;
basic_block *bbs = LOOP_VINFO_BBS (loop_vinfo);
int nbbs = loop->num_nodes;
int i;
tree niters_vector = NULL_TREE;
tree step_vector = NULL_TREE;
tree niters_vector_mult_vf = NULL_TREE;
poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
unsigned int lowest_vf = constant_lower_bound (vf);
bool grouped_store;
bool slp_scheduled = false;
gimple *stmt, *pattern_stmt;
gimple_seq pattern_def_seq = NULL;
gimple_stmt_iterator pattern_def_si = gsi_none ();
bool transform_pattern_stmt = false;
bool check_profitability = false;
unsigned int th;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "=== vec_transform_loop ===\n");
/* Use the more conservative vectorization threshold. If the number
of iterations is constant assume the cost check has been performed
by our caller. If the threshold makes all loops profitable that
run at least the (estimated) vectorization factor number of times
checking is pointless, too. */
th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
if (th >= vect_vf_for_cost (loop_vinfo)
&& !LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Profitability threshold is %d loop iterations.\n",
th);
check_profitability = true;
}
/* Make sure there exists a single-predecessor exit bb. Do this before
versioning. */
edge e = single_exit (loop);
if (! single_pred_p (e->dest))
{
split_loop_exit_edge (e);
if (dump_enabled_p ())
dump_printf (MSG_NOTE, "split exit edge\n");
}
/* Version the loop first, if required, so the profitability check
comes first. */
if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
{
poly_uint64 versioning_threshold
= LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
if (check_profitability
&& ordered_p (poly_uint64 (th), versioning_threshold))
{
versioning_threshold = ordered_max (poly_uint64 (th),
versioning_threshold);
check_profitability = false;
}
vect_loop_versioning (loop_vinfo, th, check_profitability,
versioning_threshold);
check_profitability = false;
}
/* Make sure there exists a single-predecessor exit bb also on the
scalar loop copy. Do this after versioning but before peeling
so CFG structure is fine for both scalar and if-converted loop
to make slpeel_duplicate_current_defs_from_edges face matched
loop closed PHI nodes on the exit. */
if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
{
e = single_exit (LOOP_VINFO_SCALAR_LOOP (loop_vinfo));
if (! single_pred_p (e->dest))
{
split_loop_exit_edge (e);
if (dump_enabled_p ())
dump_printf (MSG_NOTE, "split exit edge of scalar loop\n");
}
}
tree niters = vect_build_loop_niters (loop_vinfo);
LOOP_VINFO_NITERS_UNCHANGED (loop_vinfo) = niters;
tree nitersm1 = unshare_expr (LOOP_VINFO_NITERSM1 (loop_vinfo));
bool niters_no_overflow = loop_niters_no_overflow (loop_vinfo);
epilogue = vect_do_peeling (loop_vinfo, niters, nitersm1, &niters_vector,
&step_vector, &niters_vector_mult_vf, th,
check_profitability, niters_no_overflow);
if (niters_vector == NULL_TREE)
{
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& !LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
&& known_eq (lowest_vf, vf))
{
niters_vector
= build_int_cst (TREE_TYPE (LOOP_VINFO_NITERS (loop_vinfo)),
LOOP_VINFO_INT_NITERS (loop_vinfo) / lowest_vf);
step_vector = build_one_cst (TREE_TYPE (niters));
}
else
vect_gen_vector_loop_niters (loop_vinfo, niters, &niters_vector,
&step_vector, niters_no_overflow);
}
/* 1) Make sure the loop header has exactly two entries
2) Make sure we have a preheader basic block. */
gcc_assert (EDGE_COUNT (loop->header->preds) == 2);
split_edge (loop_preheader_edge (loop));
if (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
&& vect_use_loop_mask_for_alignment_p (loop_vinfo))
/* This will deal with any possible peeling. */
vect_prepare_for_masked_peels (loop_vinfo);
/* FORNOW: the vectorizer supports only loops which body consist
of one basic block (header + empty latch). When the vectorizer will
support more involved loop forms, the order by which the BBs are
traversed need to be reconsidered. */
for (i = 0; i < nbbs; i++)
{
basic_block bb = bbs[i];
stmt_vec_info stmt_info;
for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);
gsi_next (&si))
{
gphi *phi = si.phi ();
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"------>vectorizing phi: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, phi, 0);
}
stmt_info = vinfo_for_stmt (phi);
if (!stmt_info)
continue;
if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
vect_loop_kill_debug_uses (loop, phi);
if (!STMT_VINFO_RELEVANT_P (stmt_info)
&& !STMT_VINFO_LIVE_P (stmt_info))
continue;
if (STMT_VINFO_VECTYPE (stmt_info)
&& (maybe_ne
(TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info)), vf))
&& dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
if ((STMT_VINFO_DEF_TYPE (stmt_info) == vect_induction_def
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_reduction_def
|| STMT_VINFO_DEF_TYPE (stmt_info) == vect_nested_cycle)
&& ! PURE_SLP_STMT (stmt_info))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform phi.\n");
vect_transform_stmt (phi, NULL, NULL, NULL, NULL);
}
}
pattern_stmt = NULL;
for (gimple_stmt_iterator si = gsi_start_bb (bb);
!gsi_end_p (si) || transform_pattern_stmt;)
{
bool is_store;
if (transform_pattern_stmt)
stmt = pattern_stmt;
else
{
stmt = gsi_stmt (si);
/* During vectorization remove existing clobber stmts. */
if (gimple_clobber_p (stmt))
{
unlink_stmt_vdef (stmt);
gsi_remove (&si, true);
release_defs (stmt);
continue;
}
}
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"------>vectorizing statement: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt, 0);
}
stmt_info = vinfo_for_stmt (stmt);
/* vector stmts created in the outer-loop during vectorization of
stmts in an inner-loop may not have a stmt_info, and do not
need to be vectorized. */
if (!stmt_info)
{
gsi_next (&si);
continue;
}
if (MAY_HAVE_DEBUG_BIND_STMTS && !STMT_VINFO_LIVE_P (stmt_info))
vect_loop_kill_debug_uses (loop, stmt);
if (!STMT_VINFO_RELEVANT_P (stmt_info)
&& !STMT_VINFO_LIVE_P (stmt_info))
{
if (STMT_VINFO_IN_PATTERN_P (stmt_info)
&& (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
&& (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
{
stmt = pattern_stmt;
stmt_info = vinfo_for_stmt (stmt);
}
else
{
gsi_next (&si);
continue;
}
}
else if (STMT_VINFO_IN_PATTERN_P (stmt_info)
&& (pattern_stmt = STMT_VINFO_RELATED_STMT (stmt_info))
&& (STMT_VINFO_RELEVANT_P (vinfo_for_stmt (pattern_stmt))
|| STMT_VINFO_LIVE_P (vinfo_for_stmt (pattern_stmt))))
transform_pattern_stmt = true;
/* If pattern statement has def stmts, vectorize them too. */
if (is_pattern_stmt_p (stmt_info))
{
if (pattern_def_seq == NULL)
{
pattern_def_seq = STMT_VINFO_PATTERN_DEF_SEQ (stmt_info);
pattern_def_si = gsi_start (pattern_def_seq);
}
else if (!gsi_end_p (pattern_def_si))
gsi_next (&pattern_def_si);
if (pattern_def_seq != NULL)
{
gimple *pattern_def_stmt = NULL;
stmt_vec_info pattern_def_stmt_info = NULL;
while (!gsi_end_p (pattern_def_si))
{
pattern_def_stmt = gsi_stmt (pattern_def_si);
pattern_def_stmt_info
= vinfo_for_stmt (pattern_def_stmt);
if (STMT_VINFO_RELEVANT_P (pattern_def_stmt_info)
|| STMT_VINFO_LIVE_P (pattern_def_stmt_info))
break;
gsi_next (&pattern_def_si);
}
if (!gsi_end_p (pattern_def_si))
{
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"==> vectorizing pattern def "
"stmt: ");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM,
pattern_def_stmt, 0);
}
stmt = pattern_def_stmt;
stmt_info = pattern_def_stmt_info;
}
else
{
pattern_def_si = gsi_none ();
transform_pattern_stmt = false;
}
}
else
transform_pattern_stmt = false;
}
if (STMT_VINFO_VECTYPE (stmt_info))
{
poly_uint64 nunits
= TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
if (!STMT_SLP_TYPE (stmt_info)
&& maybe_ne (nunits, vf)
&& dump_enabled_p ())
/* For SLP VF is set according to unrolling factor, and not
to vector size, hence for SLP this print is not valid. */
dump_printf_loc (MSG_NOTE, vect_location, "multiple-types.\n");
}
/* SLP. Schedule all the SLP instances when the first SLP stmt is
reached. */
if (STMT_SLP_TYPE (stmt_info))
{
if (!slp_scheduled)
{
slp_scheduled = true;
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"=== scheduling SLP instances ===\n");
vect_schedule_slp (loop_vinfo);
}
/* Hybrid SLP stmts must be vectorized in addition to SLP. */
if (!vinfo_for_stmt (stmt) || PURE_SLP_STMT (stmt_info))
{
if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
{
pattern_def_seq = NULL;
gsi_next (&si);
}
continue;
}
}
/* -------- vectorize statement ------------ */
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location, "transform statement.\n");
grouped_store = false;
is_store = vect_transform_stmt (stmt, &si, &grouped_store, NULL, NULL);
if (is_store)
{
if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
{
/* Interleaving. If IS_STORE is TRUE, the vectorization of the
interleaving chain was completed - free all the stores in
the chain. */
gsi_next (&si);
vect_remove_stores (GROUP_FIRST_ELEMENT (stmt_info));
}
else
{
/* Free the attached stmt_vec_info and remove the stmt. */
gimple *store = gsi_stmt (si);
free_stmt_vec_info (store);
unlink_stmt_vdef (store);
gsi_remove (&si, true);
release_defs (store);
}
/* Stores can only appear at the end of pattern statements. */
gcc_assert (!transform_pattern_stmt);
pattern_def_seq = NULL;
}
else if (!transform_pattern_stmt && gsi_end_p (pattern_def_si))
{
pattern_def_seq = NULL;
gsi_next (&si);
}
} /* stmts in BB */
/* Stub out scalar statements that must not survive vectorization.
Doing this here helps with grouped statements, or statements that
are involved in patterns. */
for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
!gsi_end_p (gsi); gsi_next (&gsi))
{
gcall *call = dyn_cast <gcall *> (gsi_stmt (gsi));
if (call && gimple_call_internal_p (call, IFN_MASK_LOAD))
{
tree lhs = gimple_get_lhs (call);
if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
{
tree zero = build_zero_cst (TREE_TYPE (lhs));
gimple *new_stmt = gimple_build_assign (lhs, zero);
gsi_replace (&gsi, new_stmt, true);
}
}
}
} /* BBs in loop */
/* The vectorization factor is always > 1, so if we use an IV increment of 1.
a zero NITERS becomes a nonzero NITERS_VECTOR. */
if (integer_onep (step_vector))
niters_no_overflow = true;
vect_set_loop_condition (loop, loop_vinfo, niters_vector, step_vector,
niters_vector_mult_vf, !niters_no_overflow);
unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
scale_profile_for_vect_loop (loop, assumed_vf);
/* True if the final iteration might not handle a full vector's
worth of scalar iterations. */
bool final_iter_may_be_partial = LOOP_VINFO_FULLY_MASKED_P (loop_vinfo);
/* The minimum number of iterations performed by the epilogue. This
is 1 when peeling for gaps because we always need a final scalar
iteration. */
int min_epilogue_iters = LOOP_VINFO_PEELING_FOR_GAPS (loop_vinfo) ? 1 : 0;
/* +1 to convert latch counts to loop iteration counts,
-min_epilogue_iters to remove iterations that cannot be performed
by the vector code. */
int bias_for_lowest = 1 - min_epilogue_iters;
int bias_for_assumed = bias_for_lowest;
int alignment_npeels = LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo);
if (alignment_npeels && LOOP_VINFO_FULLY_MASKED_P (loop_vinfo))
{
/* When the amount of peeling is known at compile time, the first
iteration will have exactly alignment_npeels active elements.
In the worst case it will have at least one. */
int min_first_active = (alignment_npeels > 0 ? alignment_npeels : 1);
bias_for_lowest += lowest_vf - min_first_active;
bias_for_assumed += assumed_vf - min_first_active;
}
/* In these calculations the "- 1" converts loop iteration counts
back to latch counts. */
if (loop->any_upper_bound)
loop->nb_iterations_upper_bound
= (final_iter_may_be_partial
? wi::udiv_ceil (loop->nb_iterations_upper_bound + bias_for_lowest,
lowest_vf) - 1
: wi::udiv_floor (loop->nb_iterations_upper_bound + bias_for_lowest,
lowest_vf) - 1);
if (loop->any_likely_upper_bound)
loop->nb_iterations_likely_upper_bound
= (final_iter_may_be_partial
? wi::udiv_ceil (loop->nb_iterations_likely_upper_bound
+ bias_for_lowest, lowest_vf) - 1
: wi::udiv_floor (loop->nb_iterations_likely_upper_bound
+ bias_for_lowest, lowest_vf) - 1);
if (loop->any_estimate)
loop->nb_iterations_estimate
= (final_iter_may_be_partial
? wi::udiv_ceil (loop->nb_iterations_estimate + bias_for_assumed,
assumed_vf) - 1
: wi::udiv_floor (loop->nb_iterations_estimate + bias_for_assumed,
assumed_vf) - 1);
if (dump_enabled_p ())
{
if (!LOOP_VINFO_EPILOGUE_P (loop_vinfo))
{
dump_printf_loc (MSG_NOTE, vect_location,
"LOOP VECTORIZED\n");
if (loop->inner)
dump_printf_loc (MSG_NOTE, vect_location,
"OUTER LOOP VECTORIZED\n");
dump_printf (MSG_NOTE, "\n");
}
else
{
dump_printf_loc (MSG_NOTE, vect_location,
"LOOP EPILOGUE VECTORIZED (VS=");
dump_dec (MSG_NOTE, current_vector_size);
dump_printf (MSG_NOTE, ")\n");
}
}
/* Free SLP instances here because otherwise stmt reference counting
won't work. */
slp_instance instance;
FOR_EACH_VEC_ELT (LOOP_VINFO_SLP_INSTANCES (loop_vinfo), i, instance)
vect_free_slp_instance (instance);
LOOP_VINFO_SLP_INSTANCES (loop_vinfo).release ();
/* Clear-up safelen field since its value is invalid after vectorization
since vectorized loop can have loop-carried dependencies. */
loop->safelen = 0;
/* Don't vectorize epilogue for epilogue. */
if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
epilogue = NULL;
if (!PARAM_VALUE (PARAM_VECT_EPILOGUES_NOMASK))
epilogue = NULL;
if (epilogue)
{
auto_vector_sizes vector_sizes;
targetm.vectorize.autovectorize_vector_sizes (&vector_sizes);
unsigned int next_size = 0;
if (LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
&& LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) >= 0
&& known_eq (vf, lowest_vf))
{
unsigned int eiters
= (LOOP_VINFO_INT_NITERS (loop_vinfo)
- LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo));
eiters = eiters % lowest_vf;
epilogue->nb_iterations_upper_bound = eiters - 1;
unsigned int ratio;
while (next_size < vector_sizes.length ()
&& !(constant_multiple_p (current_vector_size,
vector_sizes[next_size], &ratio)
&& eiters >= lowest_vf / ratio))
next_size += 1;
}
else
while (next_size < vector_sizes.length ()
&& maybe_lt (current_vector_size, vector_sizes[next_size]))
next_size += 1;
if (next_size == vector_sizes.length ())
epilogue = NULL;
}
if (epilogue)
{
epilogue->force_vectorize = loop->force_vectorize;
epilogue->safelen = loop->safelen;
epilogue->dont_vectorize = false;
/* We may need to if-convert epilogue to vectorize it. */
if (LOOP_VINFO_SCALAR_LOOP (loop_vinfo))
tree_if_conversion (epilogue);
}
return epilogue;
}
/* The code below is trying to perform simple optimization - revert
if-conversion for masked stores, i.e. if the mask of a store is zero
do not perform it and all stored value producers also if possible.
For example,
for (i=0; i<n; i++)
if (c[i])
{
p1[i] += 1;
p2[i] = p3[i] +2;
}
this transformation will produce the following semi-hammock:
if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
{
vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
vect__12.22_172 = vect__11.19_170 + vect_cst__171;
MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
vect__19.28_184 = vect__18.25_182 + vect_cst__183;
MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
}
*/
void
optimize_mask_stores (struct loop *loop)
{
basic_block *bbs = get_loop_body (loop);
unsigned nbbs = loop->num_nodes;
unsigned i;
basic_block bb;
struct loop *bb_loop;
gimple_stmt_iterator gsi;
gimple *stmt;
auto_vec<gimple *> worklist;
vect_location = find_loop_location (loop);
/* Pick up all masked stores in loop if any. */
for (i = 0; i < nbbs; i++)
{
bb = bbs[i];
for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
gsi_next (&gsi))
{
stmt = gsi_stmt (gsi);
if (gimple_call_internal_p (stmt, IFN_MASK_STORE))
worklist.safe_push (stmt);
}
}
free (bbs);
if (worklist.is_empty ())
return;
/* Loop has masked stores. */
while (!worklist.is_empty ())
{
gimple *last, *last_store;
edge e, efalse;
tree mask;
basic_block store_bb, join_bb;
gimple_stmt_iterator gsi_to;
tree vdef, new_vdef;
gphi *phi;
tree vectype;
tree zero;
last = worklist.pop ();
mask = gimple_call_arg (last, 2);
bb = gimple_bb (last);
/* Create then_bb and if-then structure in CFG, then_bb belongs to
the same loop as if_bb. It could be different to LOOP when two
level loop-nest is vectorized and mask_store belongs to the inner
one. */
e = split_block (bb, last);
bb_loop = bb->loop_father;
gcc_assert (loop == bb_loop || flow_loop_nested_p (loop, bb_loop));
join_bb = e->dest;
store_bb = create_empty_bb (bb);
add_bb_to_loop (store_bb, bb_loop);
e->flags = EDGE_TRUE_VALUE;
efalse = make_edge (bb, store_bb, EDGE_FALSE_VALUE);
/* Put STORE_BB to likely part. */
efalse->probability = profile_probability::unlikely ();
store_bb->count = efalse->count ();
make_single_succ_edge (store_bb, join_bb, EDGE_FALLTHRU);
if (dom_info_available_p (CDI_DOMINATORS))
set_immediate_dominator (CDI_DOMINATORS, store_bb, bb);
if (dump_enabled_p ())
dump_printf_loc (MSG_NOTE, vect_location,
"Create new block %d to sink mask stores.",
store_bb->index);
/* Create vector comparison with boolean result. */
vectype = TREE_TYPE (mask);
zero = build_zero_cst (vectype);
stmt = gimple_build_cond (EQ_EXPR, mask, zero, NULL_TREE, NULL_TREE);
gsi = gsi_last_bb (bb);
gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
/* Create new PHI node for vdef of the last masked store:
.MEM_2 = VDEF <.MEM_1>
will be converted to
.MEM.3 = VDEF <.MEM_1>
and new PHI node will be created in join bb
.MEM_2 = PHI <.MEM_1, .MEM_3>
*/
vdef = gimple_vdef (last);
new_vdef = make_ssa_name (gimple_vop (cfun), last);
gimple_set_vdef (last, new_vdef);
phi = create_phi_node (vdef, join_bb);
add_phi_arg (phi, new_vdef, EDGE_SUCC (store_bb, 0), UNKNOWN_LOCATION);
/* Put all masked stores with the same mask to STORE_BB if possible. */
while (true)
{
gimple_stmt_iterator gsi_from;
gimple *stmt1 = NULL;
/* Move masked store to STORE_BB. */
last_store = last;
gsi = gsi_for_stmt (last);
gsi_from = gsi;
/* Shift GSI to the previous stmt for further traversal. */
gsi_prev (&gsi);
gsi_to = gsi_start_bb (store_bb);
gsi_move_before (&gsi_from, &gsi_to);
/* Setup GSI_TO to the non-empty block start. */
gsi_to = gsi_start_bb (store_bb);
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Move stmt to created bb\n");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, last, 0);
}
/* Move all stored value producers if possible. */
while (!gsi_end_p (gsi))
{
tree lhs;
imm_use_iterator imm_iter;
use_operand_p use_p;
bool res;
/* Skip debug statements. */
if (is_gimple_debug (gsi_stmt (gsi)))
{
gsi_prev (&gsi);
continue;
}
stmt1 = gsi_stmt (gsi);
/* Do not consider statements writing to memory or having
volatile operand. */
if (gimple_vdef (stmt1)
|| gimple_has_volatile_ops (stmt1))
break;
gsi_from = gsi;
gsi_prev (&gsi);
lhs = gimple_get_lhs (stmt1);
if (!lhs)
break;
/* LHS of vectorized stmt must be SSA_NAME. */
if (TREE_CODE (lhs) != SSA_NAME)
break;
if (!VECTOR_TYPE_P (TREE_TYPE (lhs)))
{
/* Remove dead scalar statement. */
if (has_zero_uses (lhs))
{
gsi_remove (&gsi_from, true);
continue;
}
}
/* Check that LHS does not have uses outside of STORE_BB. */
res = true;
FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
{
gimple *use_stmt;
use_stmt = USE_STMT (use_p);
if (is_gimple_debug (use_stmt))
continue;
if (gimple_bb (use_stmt) != store_bb)
{
res = false;
break;
}
}
if (!res)
break;
if (gimple_vuse (stmt1)
&& gimple_vuse (stmt1) != gimple_vuse (last_store))
break;
/* Can move STMT1 to STORE_BB. */
if (dump_enabled_p ())
{
dump_printf_loc (MSG_NOTE, vect_location,
"Move stmt to created bb\n");
dump_gimple_stmt (MSG_NOTE, TDF_SLIM, stmt1, 0);
}
gsi_move_before (&gsi_from, &gsi_to);
/* Shift GSI_TO for further insertion. */
gsi_prev (&gsi_to);
}
/* Put other masked stores with the same mask to STORE_BB. */
if (worklist.is_empty ()
|| gimple_call_arg (worklist.last (), 2) != mask
|| worklist.last () != stmt1)
break;
last = worklist.pop ();
}
add_phi_arg (phi, gimple_vuse (last_store), e, UNKNOWN_LOCATION);
}
}
|
GB_unop__identity_int64_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_int32)
// op(A') function: GB (_unop_tran__identity_int64_int32)
// C type: int64_t
// A type: int32_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_int32)
(
int64_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
omp_dem_search.h | //
// Project Name: Kratos
// Last Modified by: $Author: clabra $
// Date: $Date: 2007-03-29 19:37:47 $
// Revision: $Revision: 1.2 $
//
//
#if !defined(KRATOS_OMP_DEM_SEARCH_H_INCLUDED )
#define KRATOS_OMP_DEM_SEARCH_H_INCLUDED
// System includes
#include <string>
#include <iostream>
// include kratos definitions
#include "includes/define.h"
// Project includes
#include "spatial_containers/dem_search.h"
#include "utilities/openmp_utils.h"
// Configures
#include "discrete_particle_configure.h"
#include "geometrical_object_configure.h"
#include "node_configure.h"
// Search
#include "spatial_containers/bins_dynamic_objects.h"
#include "spatial_containers/bins_dynamic.h"
#include "custom_search/bins_dynamic_objects_periodic.h"
// External includes
/* Timer defines */
#include "utilities/timer.h"
#ifdef CUSTOMTIMER
#define KRATOS_TIMER_START(t) Timer::Start(t);
#define KRATOS_TIMER_STOP(t) Timer::Stop(t);
#else
#define KRATOS_TIMER_START(t)
#define KRATOS_TIMER_STOP(t)
#endif
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
/** Detail class definition.
*/
class OMP_DEMSearch : public DEMSearch<OMP_DEMSearch>
{
public:
///@name Type Definitions
///@{
/// Pointer definition of OMP_DEMSearch
KRATOS_CLASS_POINTER_DEFINITION(OMP_DEMSearch);
typedef PointType* PtrPointType;
typedef std::vector<PtrPointType>* PointVector;
typedef std::vector<PtrPointType>::iterator PointIterator;
typedef double* DistanceVector;
typedef double* DistanceIterator;
//Configure Types
typedef DiscreteParticleConfigure<3> ElementConfigureType; //Element
typedef NodeConfigure<3> NodeConfigureType; //Node
typedef GeometricalConfigure<3> GeometricalConfigureType; //Generic Geometry
//Bin Types
typedef BinsObjectDynamic<ElementConfigureType> BinsType;
typedef BinsObjectDynamicPeriodic<ElementConfigureType> BinsTypePeriodic;
typedef std::unique_ptr<BinsType> BinsUniquePointerType;
typedef BinsObjectDynamic<NodeConfigureType> NodeBinsType;
typedef BinsObjectDynamicPeriodic<NodeConfigureType> NodeBinsTypePeriodic;
typedef std::unique_ptr<NodeBinsType> NodeBinsUniquePointerType;
typedef BinsObjectDynamic<GeometricalConfigureType> GeometricalBinsType;
//GeoimetricalObject
typedef PointerVectorSet<GeometricalObject, IndexedObject> GeometricalObjectType;
///@}
///@name Life Cycle
///@{
/// Default constructor.
OMP_DEMSearch(const double domain_min_x = 0.0, const double domain_min_y = 0.0, const double domain_min_z = 0.0,
const double domain_max_x = -1.0, const double domain_max_y = -1.0, const double domain_max_z = -1.0)
{
mDomainPeriodicity = (domain_min_x <= domain_max_x) ? true : false;
}
/// Destructor.
~OMP_DEMSearch(){
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void SearchElementsInRadiusExclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
// KRATOS_TRY
//
// int MaxNumberOfElements = rStructureElements.size();
//
// ElementsContainerType::ContainerType& elements_bins = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
// ElementsContainerType::ContainerType& elements_sear = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
//
// GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
// GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
//
// BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
// SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
//
// for (ElementsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
// BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
//
// for (ElementsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
// SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
//
// GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
//
// #pragma omp parallel
// {
// GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
// DistanceType localResultsDistances(MaxNumberOfElements);
// std::size_t NumberOfResults = 0;
//
// #pragma omp for
// for (std::size_t i = 0; i < elements_sear.size(); ++i)
// {
// GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
// DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
//
// NumberOfResults = bins.SearchObjectsInRadiusExclusive(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
//
// rResults[i].reserve(NumberOfResults);
//
// for (GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
// {
// Element::Pointer elem = dynamic_pointer_cast<Element>(*it);
// rResults[i].push_back(elem);
// rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
// }
// }
// }
//
// KRATOS_CATCH("")
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_array = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
ElementsContainerType::ContainerType& elements_ModelPart = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
BinsUniquePointerType p_bins = GetBins(elements_ModelPart);
#pragma omp parallel
{
ResultElementsContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for schedule(dynamic, 100) //schedule(guided)
for (int i = 0; i < static_cast<int>(elements_array.size()); ++i){
ResultElementsContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>(&*elements_array[i]);
const double radius = p_particle->GetSearchRadius();
NumberOfResults = p_bins->SearchObjectsInRadiusExclusive(elements_array[i],radius,ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
//MAJOR TODO: creating and destroying (when leaving the function) this BINS is not parallel and takes a significant time if we search at every time step. Can we re-use a bins and avoid allocation and deallocation?? MA
KRATOS_CATCH("")
}
void SearchElementsInRadiusInclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType& Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_array = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
ElementsContainerType::ContainerType& elements_ModelPart = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
BinsUniquePointerType p_bins = GetBins(elements_ModelPart);
#pragma omp parallel
{
ResultElementsContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_array.size()); ++i){
ResultElementsContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>(&*elements_array[i]);
const double radius = p_particle->GetSearchRadius();
NumberOfResults = p_bins->SearchObjectsInRadius(elements_array[i],radius,ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchElementsInRadiusExclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_array = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
ElementsContainerType::ContainerType& elements_ModelPart = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
BinsUniquePointerType p_bins = GetBins(elements_ModelPart);
#pragma omp parallel
{
ResultElementsContainerType localResults(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_array.size()); ++i){
ResultElementsContainerType::iterator ResultsPointer = localResults.begin();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>(&*elements_array[i]);
const double radius = p_particle->GetSearchRadius();
NumberOfResults = p_bins->SearchObjectsInRadiusExclusive(elements_array[i],radius,ResultsPointer,MaxNumberOfElements);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchElementsInRadiusInclusiveImplementation (
ElementsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_array = const_cast<ElementsContainerType::ContainerType&>(rElements.GetContainer());
ElementsContainerType::ContainerType& elements_ModelPart = const_cast<ElementsContainerType::ContainerType&>(rStructureElements.GetContainer());
BinsType bins(elements_ModelPart.begin(), elements_ModelPart.end());
#pragma omp parallel
{
ResultElementsContainerType localResults(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_array.size()); ++i){
ResultElementsContainerType::iterator ResultsPointer = localResults.begin();
SphericParticle* p_particle = dynamic_cast<SphericParticle*>(&*elements_array[i]);
const double radius = p_particle->GetSearchRadius();
NumberOfResults = bins.SearchObjectsInRadius(elements_array[i],radius,ResultsPointer,MaxNumberOfElements);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchNodesInRadiusExclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfNodes = rNodes.size();
NodesContainerType::ContainerType& nodes_ModelPart = const_cast<NodesContainerType::ContainerType&>(rNodes.GetContainer());
NodesContainerType::ContainerType& nodes_array = const_cast<NodesContainerType::ContainerType&>(rStructureNodes.GetContainer());
// NodeBinsType bins(nodes_ModelPart.begin(), nodes_ModelPart.end());
NodeBinsUniquePointerType p_bins = GetBins(nodes_ModelPart);
#pragma omp parallel
{
ResultNodesContainerType localResults(MaxNumberOfNodes);
DistanceType localResultsDistances(MaxNumberOfNodes);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(nodes_array.size()); ++i){
ResultNodesContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = p_bins->SearchObjectsInRadiusExclusive(nodes_array[i], Radius[i], ResultsPointer, ResultsDistancesPointer, MaxNumberOfNodes);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchNodesInRadiusInclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfNodes = rStructureNodes.size();
NodesContainerType::ContainerType& nodes_array = const_cast<NodesContainerType::ContainerType&>(rNodes.GetContainer());
NodesContainerType::ContainerType& nodes_ModelPart = const_cast<NodesContainerType::ContainerType&>(rStructureNodes.GetContainer());
NodeBinsType bins(nodes_ModelPart.begin(), nodes_ModelPart.end());
#pragma omp parallel
{
ResultNodesContainerType localResults(MaxNumberOfNodes);
DistanceType localResultsDistances(MaxNumberOfNodes);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(nodes_array.size()); ++i){
ResultNodesContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadius(nodes_array[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfNodes);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchNodesInRadiusExclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults )
{
KRATOS_TRY
int MaxNumberOfNodes = rStructureNodes.size();
NodesContainerType::ContainerType& nodes_array = const_cast<NodesContainerType::ContainerType&>(rNodes.GetContainer());
NodesContainerType::ContainerType& nodes_ModelPart = const_cast<NodesContainerType::ContainerType&>(rStructureNodes.GetContainer());
NodeBinsType bins(nodes_ModelPart.begin(), nodes_ModelPart.end());
#pragma omp parallel
{
ResultNodesContainerType localResults(MaxNumberOfNodes);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(nodes_array.size()); ++i){
ResultNodesContainerType::iterator ResultsPointer = localResults.begin();
NumberOfResults = bins.SearchObjectsInRadiusExclusive(nodes_array[i],Radius[i],ResultsPointer,MaxNumberOfNodes);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchNodesInRadiusInclusiveImplementation (
NodesContainerType const& rStructureNodes,
NodesContainerType const& rNodes,
const RadiusArrayType & Radius,
VectorResultNodesContainerType& rResults )
{
KRATOS_TRY
int MaxNumberOfNodes = rStructureNodes.size();
NodesContainerType::ContainerType& nodes_array = const_cast<NodesContainerType::ContainerType&>(rNodes.GetContainer());
NodesContainerType::ContainerType& nodes_ModelPart = const_cast<NodesContainerType::ContainerType&>(rStructureNodes.GetContainer());
NodeBinsType bins(nodes_ModelPart.begin(), nodes_ModelPart.end());
#pragma omp parallel
{
ResultNodesContainerType localResults(MaxNumberOfNodes);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(nodes_array.size()); ++i){
ResultNodesContainerType::iterator ResultsPointer = localResults.begin();
NumberOfResults = bins.SearchObjectsInRadius(nodes_array[i],Radius[i],ResultsPointer,MaxNumberOfNodes);
rResults[i].insert(rResults[i].begin(),localResults.begin(),localResults.begin()+NumberOfResults);
}
}
KRATOS_CATCH("")
}
void SearchGeometricalInRadiusExclusiveImplementation (
ElementsContainerType const& rStructureElements,
ConditionsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultConditionsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_bins = const_cast<ElementsContainerType::ContainerType&> (rStructureElements.GetContainer());
ConditionsContainerType::ContainerType& elements_sear = const_cast<ConditionsContainerType::ContainerType&>(rElements.GetContainer());
GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
for (ElementsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
for (ConditionsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_sear.size()); ++i){
GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadiusExclusive(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].reserve(NumberOfResults);
for (GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
{
Condition::Pointer elem = dynamic_pointer_cast<Condition>(*it);
rResults[i].push_back(elem);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
}
KRATOS_CATCH("")
}
void SearchGeometricalInRadiusInclusiveImplementation (
ElementsContainerType const& rStructureElements,
ConditionsContainerType const& rElements,
const RadiusArrayType& Radius,
VectorResultConditionsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ElementsContainerType::ContainerType& elements_bins = const_cast<ElementsContainerType::ContainerType&> (rStructureElements.GetContainer());
ConditionsContainerType::ContainerType& elements_sear = const_cast<ConditionsContainerType::ContainerType&>(rElements.GetContainer());
GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
for (ElementsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
for (ConditionsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_sear.size()); ++i){
GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadius(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].reserve(NumberOfResults);
for (GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
{
Condition::Pointer elem = dynamic_pointer_cast<Condition>(*it);
rResults[i].push_back(elem);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
}
KRATOS_CATCH("")
}
void SearchGeometricalInRadiusExclusiveImplementation (
ConditionsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType & Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ConditionsContainerType::ContainerType& elements_bins = const_cast<ConditionsContainerType::ContainerType&>(rStructureElements.GetContainer());
ElementsContainerType::ContainerType& elements_sear = const_cast<ElementsContainerType::ContainerType&> (rElements.GetContainer());
GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
for (ElementsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
for (ConditionsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_sear.size()); ++i){
GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadiusExclusive(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].reserve(NumberOfResults);
for (GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
{
Element::Pointer elem = dynamic_pointer_cast<Element>(*it);
rResults[i].push_back(elem);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
}
KRATOS_CATCH("")
}
void SearchGeometricalInRadiusInclusiveImplementation (
ConditionsContainerType const& rStructureElements,
ElementsContainerType const& rElements,
const RadiusArrayType& Radius,
VectorResultElementsContainerType& rResults,
VectorDistanceType& rResultsDistance )
{
KRATOS_TRY
int MaxNumberOfElements = rStructureElements.size();
ConditionsContainerType::ContainerType& elements_bins = const_cast<ConditionsContainerType::ContainerType&>(rStructureElements.GetContainer());
ElementsContainerType::ContainerType& elements_sear = const_cast<ElementsContainerType::ContainerType&> (rElements.GetContainer());
GeometricalObjectType::ContainerType SearElementPointerToGeometricalObjecPointerTemporalVector;
GeometricalObjectType::ContainerType BinsElementPointerToGeometricalObjecPointerTemporalVector;
SearElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_sear.size());
BinsElementPointerToGeometricalObjecPointerTemporalVector.reserve(elements_bins.size());
for (ElementsContainerType::ContainerType::iterator it = elements_sear.begin(); it != elements_sear.end(); it++)
SearElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
for (ConditionsContainerType::ContainerType::iterator it = elements_bins.begin(); it != elements_bins.end(); it++)
BinsElementPointerToGeometricalObjecPointerTemporalVector.push_back(*it);
GeometricalBinsType bins(BinsElementPointerToGeometricalObjecPointerTemporalVector.begin(), BinsElementPointerToGeometricalObjecPointerTemporalVector.end());
#pragma omp parallel
{
GeometricalObjectType::ContainerType localResults(MaxNumberOfElements);
DistanceType localResultsDistances(MaxNumberOfElements);
std::size_t NumberOfResults = 0;
#pragma omp for
for (int i = 0; i < static_cast<int>(elements_sear.size()); ++i){
GeometricalObjectType::ContainerType::iterator ResultsPointer = localResults.begin();
DistanceType::iterator ResultsDistancesPointer = localResultsDistances.begin();
NumberOfResults = bins.SearchObjectsInRadius(SearElementPointerToGeometricalObjecPointerTemporalVector[i],Radius[i],ResultsPointer,ResultsDistancesPointer,MaxNumberOfElements);
rResults[i].reserve(NumberOfResults);
for (GeometricalObjectType::ContainerType::iterator it = localResults.begin(); it != localResults.begin() + NumberOfResults; it++)
{
Element::Pointer elem = dynamic_pointer_cast<Element>(*it);
rResults[i].push_back(elem);
rResultsDistance[i].insert(rResultsDistance[i].begin(),localResultsDistances.begin(),localResultsDistances.begin()+NumberOfResults);
}
}
}
KRATOS_CATCH("")
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const override
{
std::stringstream buffer;
buffer << "OpenMPDemSearch" ;
return buffer.str();
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const override {rOStream << "OpenMPDemSearch";}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const override {}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///
BinsUniquePointerType GetBins(ElementsContainerType::ContainerType& r_model_part_container)
{
if (mDomainPeriodicity){
return std::unique_ptr<BinsType>(new BinsTypePeriodic(r_model_part_container.begin(), r_model_part_container.end(), this->mDomainMin, this->mDomainMax));
}
else {
return std::unique_ptr<BinsType>(new BinsType(r_model_part_container.begin(), r_model_part_container.end()));
}
}
NodeBinsUniquePointerType GetBins(NodesContainerType::ContainerType& r_model_part_container)
{
if (mDomainPeriodicity){
return std::unique_ptr<NodeBinsType>(new NodeBinsTypePeriodic(r_model_part_container.begin(), r_model_part_container.end(), this->mDomainMin, this->mDomainMax));
}
else {
return std::unique_ptr<NodeBinsType>(new NodeBinsType(r_model_part_container.begin(), r_model_part_container.end()));
}
}
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
OMP_DEMSearch& operator=(OMP_DEMSearch const& rOther)
{
return *this;
}
/// Copy constructor.
OMP_DEMSearch(OMP_DEMSearch const& rOther)
{
*this = rOther;
}
///@}
}; // Class DEMSearch
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
// inline std::istream& operator >> (std::istream& rIStream,
// DEMSearch& rThis){return rIStream;}
//
// /// output stream function
// inline std::ostream& operator << (std::ostream& rOStream,
// const DEMSearch& rThis)
// {
// rThis.PrintInfo(rOStream);
// rOStream << std::endl;
// rThis.PrintData(rOStream);
//
// return rOStream;
// }
///@}
///@} addtogroup block
} // namespace Kratos.
#endif // KRATOS_DEM_SEARCH_H_INCLUDED defined
|
irbuilder_unroll_partial_heuristic_runtime_for.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
// REQUIRES: x86-registered-target
#ifndef HEADER
#define HEADER
double sind(double);
// CHECK-LABEL: define {{.*}}@unroll_partial_heuristic_runtime_for(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[N_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[E_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[OFFSET_ADDR:.+]] = alloca float, align 4
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32 %[[N:.+]], i32* %[[N_ADDR]], align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store float* %[[E:.+]], float** %[[E_ADDR]], align 8
// CHECK-NEXT: store float %[[OFFSET:.+]], float* %[[OFFSET_ADDR]], align 4
// CHECK-NEXT: store i32 0, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 1
// CHECK-NEXT: store i32* %[[N_ADDR]], i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP2:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[TMP2]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: %[[TMP4:.+]] = udiv i32 %[[DOTCOUNT]], 4
// CHECK-NEXT: %[[TMP5:.+]] = urem i32 %[[DOTCOUNT]], 4
// CHECK-NEXT: %[[TMP6:.+]] = icmp ne i32 %[[TMP5]], 0
// CHECK-NEXT: %[[TMP7:.+]] = zext i1 %[[TMP6]] to i32
// CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP4]], %[[TMP7]]
// CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]:
// CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP8:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1
// CHECK-NEXT: store i32 %[[TMP8]], i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: %[[TMP11:.+]] = sub i32 %[[TMP10]], %[[TMP9]]
// CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[TMP11]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_HEADER]]:
// CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_COND]]:
// CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP12]]
// CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_BODY]]:
// CHECK-NEXT: %[[TMP13:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP9]]
// CHECK-NEXT: %[[TMP14:.+]] = icmp eq i32 %[[TMP13]], %[[OMP_FLOOR0_TRIPCOUNT]]
// CHECK-NEXT: %[[TMP15:.+]] = select i1 %[[TMP14]], i32 %[[TMP5]], i32 4
// CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_HEADER]]:
// CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_COND]]:
// CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP15]]
// CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_BODY]]:
// CHECK-NEXT: %[[TMP16:.+]] = mul nuw i32 4, %[[TMP13]]
// CHECK-NEXT: %[[TMP17:.+]] = add nuw i32 %[[TMP16]], %[[OMP_TILE0_IV]]
// CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP17]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP18:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP19:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP19]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP18]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP20:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[CONV:.+]] = fpext float %[[TMP20]] to double
// CHECK-NEXT: %[[CALL:.+]] = call double @sind(double %[[CONV]])
// CHECK-NEXT: %[[TMP21:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP22:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP22]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP21]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP23:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[CONV4:.+]] = fpext float %[[TMP23]] to double
// CHECK-NEXT: %[[MUL:.+]] = fmul double %[[CALL]], %[[CONV4]]
// CHECK-NEXT: %[[TMP24:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP25:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM5:.+]] = sext i32 %[[TMP25]] to i64
// CHECK-NEXT: %[[ARRAYIDX6:.+]] = getelementptr inbounds float, float* %[[TMP24]], i64 %[[IDXPROM5]]
// CHECK-NEXT: %[[TMP26:.+]] = load float, float* %[[ARRAYIDX6]], align 4
// CHECK-NEXT: %[[CONV7:.+]] = fpext float %[[TMP26]] to double
// CHECK-NEXT: %[[MUL8:.+]] = fmul double %[[MUL]], %[[CONV7]]
// CHECK-NEXT: %[[TMP27:.+]] = load float*, float** %[[E_ADDR]], align 8
// CHECK-NEXT: %[[TMP28:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM9:.+]] = sext i32 %[[TMP28]] to i64
// CHECK-NEXT: %[[ARRAYIDX10:.+]] = getelementptr inbounds float, float* %[[TMP27]], i64 %[[IDXPROM9]]
// CHECK-NEXT: %[[TMP29:.+]] = load float, float* %[[ARRAYIDX10]], align 4
// CHECK-NEXT: %[[CONV11:.+]] = fpext float %[[TMP29]] to double
// CHECK-NEXT: %[[MUL12:.+]] = fmul double %[[MUL8]], %[[CONV11]]
// CHECK-NEXT: %[[TMP30:.+]] = load float, float* %[[OFFSET_ADDR]], align 4
// CHECK-NEXT: %[[CONV13:.+]] = fpext float %[[TMP30]] to double
// CHECK-NEXT: %[[ADD:.+]] = fadd double %[[MUL12]], %[[CONV13]]
// CHECK-NEXT: %[[TMP31:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP32:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM14:.+]] = sext i32 %[[TMP32]] to i64
// CHECK-NEXT: %[[ARRAYIDX15:.+]] = getelementptr inbounds float, float* %[[TMP31]], i64 %[[IDXPROM14]]
// CHECK-NEXT: %[[TMP33:.+]] = load float, float* %[[ARRAYIDX15]], align 4
// CHECK-NEXT: %[[CONV16:.+]] = fpext float %[[TMP33]] to double
// CHECK-NEXT: %[[ADD17:.+]] = fadd double %[[CONV16]], %[[ADD]]
// CHECK-NEXT: %[[CONV18:.+]] = fptrunc double %[[ADD17]] to float
// CHECK-NEXT: store float %[[CONV18]], float* %[[ARRAYIDX15]], align 4
// CHECK-NEXT: br label %[[OMP_TILE0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_INC]]:
// CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_EXIT]]:
// CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_FLOOR0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_INC]]:
// CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_EXIT]]:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]])
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM19:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM19]])
// CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
void unroll_partial_heuristic_runtime_for(int n, float *a, float *b, float *c, float *d, float *e, float offset) {
#pragma omp for
#pragma omp unroll partial
for (int i = 0; i < n; i++) {
a[i] += sind(b[i]) * c[i] * d[i] * e[i] + offset;
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 1
// CHECK-NEXT: %[[TMP5:.+]] = load i32*, i32** %[[TMP4]], align 8
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[TMP5]], align 4
// CHECK-NEXT: store i32 %[[TMP6]], i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP7]], %[[TMP8]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP10:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP9]], %[[TMP10]]
// CHECK-NEXT: %[[TMP11:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[SUB]], %[[TMP11]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP12:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP12]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
// CHECK: ![[META2:[0-9]+]] =
// CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]}
// CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
// CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 4}
|
hessian_screen.c | /* Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <complex.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#include "np_helper/np_helper.h"
int int2e_sph();
int int2e_cart();
int int2e_ipvip1_cart();
int int2e_spsp1spsp2_cart();
int int2e_spsp1spsp2_sph();
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
/*
* Gradients screening for grad/rhf.py
*/
// ijkl,lk->ij
// ijkl,jk->il
// ijkl,kl->ij
// ijkl,jl->ik
int CVHFgrad_jk_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *q_cond_kl = opt->q_cond + n * n;
double qijkl = opt->q_cond[i*n+j] * q_cond_kl[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((2*opt->dm_cond[l*n+k] > dmin)
|| ( opt->dm_cond[j*n+k] > dmin)
|| ( opt->dm_cond[j*n+l] > dmin));
}
void CVHFgrad_jk_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
nbas = opt->nbas;
// First n*n elements for derivatives, the next n*n elements for regular ERIs
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2);
if (ao_loc[nbas] == CINTtot_cgto_spheric(bas, nbas)) {
CVHFset_int2e_q_cond(int2e_sph, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
} else {
CVHFset_int2e_q_cond(int2e_cart, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
}
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel \
shared(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env)
{
double qtmp;
int ij, i, j, iijj, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double *buf = malloc(sizeof(double) * 9 * di*di*di*di);
double *bufx = buf;
double *bufy, *bufz;
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*nbas; ij++) {
ish = ij / nbas;
jsh = ij - ish * nbas;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
bufy = buf + 4*(di*dj*di*dj);
bufz = buf + 8*(di*dj*di*dj);
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
iijj = i+di*j+di*dj*i+di*dj*di*j;
qtmp = MAX(qtmp, fabs(bufx[iijj]));
qtmp = MAX(qtmp, fabs(bufy[iijj]));
qtmp = MAX(qtmp, fabs(bufz[iijj]));
} }
qtmp = sqrt(qtmp);
}
opt->q_cond[ish*nbas+jsh] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFgrad_jk_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
nbas = opt->nbas;
opt->dm_cond = (double *)malloc(sizeof(double) * nbas*nbas);
NPdset0(opt->dm_cond, ((size_t)nbas)*nbas);
const size_t nao = ao_loc[nbas];
double dmax;
int i, j, ish, jsh;
int iset;
double *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh < nbas; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
dmax = MAX(dmax, fabs(pdm[i*nao+j]));
} }
}
opt->dm_cond[ish*nbas+jsh] = dmax;
} }
}
/*
* Hessian screening for hessian/rhf.py
*/
// ijkl,ji->kl
// ijkl,li->kj
// ijkl,lj->ki
int CVHFip1ip2_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((opt->dm_cond[j*n+i] > dmin)
|| (opt->dm_cond[l*n+i] > dmin)
|| (opt->dm_cond[l*n+j] > dmin));
}
void CVHFip1ip2_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFgrad_jk_direct_scf(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFip1ip2_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
CVHFgrad_jk_direct_scf_dm(opt, dm, nset, ao_loc, atm, natm, bas, nbas, env);
}
// ijkl,lk->ij
// ijkl,jk->il
// ijkl,kl->ij
// ijkl,jl->ik
int CVHFipip1_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *q_cond_kl = opt->q_cond + n * n;
double qijkl = opt->q_cond[i*n+j] * q_cond_kl[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((2*opt->dm_cond[l*n+k] > dmin)
|| ( opt->dm_cond[j*n+k] > dmin)
|| ( opt->dm_cond[j*n+l] > dmin));
}
void CVHFipip1_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
if (opt->q_cond) {
free(opt->q_cond);
}
nbas = opt->nbas;
// First n*n elements for derivatives, the next n*n elements for regular ERIs
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas*2);
if (ao_loc[nbas] == CINTtot_cgto_spheric(bas, nbas)) {
CVHFset_int2e_q_cond(int2e_sph, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
} else {
CVHFset_int2e_q_cond(int2e_cart, NULL, opt->q_cond+nbas*nbas, ao_loc,
atm, natm, bas, nbas, env);
}
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel \
shared(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env)
{
double qtmp;
int ij, i, j, iijj, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double *buf = malloc(sizeof(double) * 256 * di*di*di*di);
double *bufxx = buf;
double *bufxy, *bufxz, *bufyx, *bufyy, *bufyz, *bufzx, *bufzy, *bufzz;
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*nbas; ij++) {
ish = ij / nbas;
jsh = ij - ish * nbas;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
iijj = di * dj * di * dj;
bufxy = buf + ( 1*16+ 1)*iijj;
bufxz = buf + ( 2*16+ 2)*iijj;
bufyx = buf + ( 4*16+ 4)*iijj;
bufyy = buf + ( 5*16+ 5)*iijj;
bufyz = buf + ( 6*16+ 6)*iijj;
bufzx = buf + ( 8*16+ 8)*iijj;
bufzy = buf + ( 9*16+ 9)*iijj;
bufzz = buf + (10*16+10)*iijj;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
iijj = i+di*j+di*dj*i+di*dj*di*j;
qtmp = MAX(qtmp, fabs(bufxx[iijj]));
qtmp = MAX(qtmp, fabs(bufxy[iijj]));
qtmp = MAX(qtmp, fabs(bufxz[iijj]));
qtmp = MAX(qtmp, fabs(bufyx[iijj]));
qtmp = MAX(qtmp, fabs(bufyy[iijj]));
qtmp = MAX(qtmp, fabs(bufyz[iijj]));
qtmp = MAX(qtmp, fabs(bufzx[iijj]));
qtmp = MAX(qtmp, fabs(bufzy[iijj]));
qtmp = MAX(qtmp, fabs(bufzz[iijj]));
} }
qtmp = sqrt(qtmp);
}
opt->q_cond[ish*nbas+jsh] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFipip1_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
CVHFgrad_jk_direct_scf_dm(opt, dm, nset, ao_loc, atm, natm, bas, nbas, env);
}
// ijkl,lk->ij
// ijkl,li->kj
// ijkl,kl->ij
// ijkl,ki->lj
int CVHFipvip1_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double *q_cond_kl = opt->q_cond + n * n;
double qijkl = opt->q_cond[i*n+j] * q_cond_kl[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((2*opt->dm_cond[l*n+k] > dmin)
|| ( opt->dm_cond[l*n+i] > dmin)
|| ( opt->dm_cond[k*n+i] > dmin));
}
void CVHFipvip1_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFipip1_direct_scf(opt, intor, cintopt, ao_loc, atm, natm, bas, nbas, env);
}
void CVHFipvip1_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
CVHFgrad_jk_direct_scf_dm(opt, dm, nset, ao_loc, atm, natm, bas, nbas, env);
}
|
GB_unop__log_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log_fp64_fp64
// op(A') function: GB_unop_tran__log_fp64_fp64
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = log (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = log (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log_fp64_fp64
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = log (z) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log_fp64_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
test.c |
#include <stdlib.h>
#include <stdio.h>
#include "omp.h"
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define N 10
int main()
{
double a[N], a_h[N];
double b[N], c[N];
int fail = 0;
check_offloading();
long cpuExec = 0;
#pragma omp target map(tofrom: cpuExec)
{
cpuExec = omp_is_initial_device();
}
// taskloop is only implemented on the gpu
if (!cpuExec) {
// Test: basic with shared
for(int i = 0 ; i < N ; i++) {
a[i] = a_h[i] = 0;
b[i] = i;
c[i] = i-7;
}
#pragma omp target map(tofrom:a) map(to:b,c)
{
#pragma omp parallel
#pragma omp single
#pragma omp taskloop shared(a)
for(int i = 0 ; i < N; i++) {
if(i == N/2) {
#pragma omp taskyield
}
a[i] += b[i] + c[i];
}
}
for(int i = 0 ; i < N; i++)
a_h[i] += b[i] + c[i];
for(int i = 0 ; i < N; i++)
if (a[i] != a_h[i]) {
printf("Error %d: device = %lf, host = %lf\n", i, a[i], a_h[i]);
fail = 1;
}
if (fail)
printf("Failed\n");
else
printf("Succeeded\n");
} else // if !cpuExec
DUMP_SUCCESS(1);
return 0;
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef enum
{
BitwiseAndAssignmentOperator = 0xd9U,
BitwiseOrAssignmentOperator,
LeftShiftAssignmentOperator,
RightShiftAssignmentOperator,
PowerAssignmentOperator,
ModuloAssignmentOperator,
PlusAssignmentOperator,
SubtractAssignmentOperator,
MultiplyAssignmentOperator,
DivideAssignmentOperator,
IncrementAssignmentOperator,
DecrementAssignmentOperator,
LeftShiftOperator,
RightShiftOperator,
LessThanEqualOperator,
GreaterThanEqualOperator,
EqualOperator,
NotEqualOperator,
LogicalAndOperator,
LogicalOrOperator,
ExponentialNotation
} FxOperator;
struct _FxInfo
{
const Image
*images;
char
*expression;
FILE
*file;
SplayTreeInfo
*colors,
*symbols;
CacheView
**view;
RandomInfo
*random_info;
ExceptionInfo
*exception;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireFxInfo() allocates the FxInfo structure.
%
% The format of the AcquireFxInfo method is:
%
% FxInfo *AcquireFxInfo(Image *images,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o expression: the expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate FxInfo *AcquireFxInfo(const Image *images,const char *expression,
ExceptionInfo *exception)
{
const Image
*next;
FxInfo
*fx_info;
ssize_t
i;
unsigned char
fx_op[2];
fx_info=(FxInfo *) AcquireCriticalMemory(sizeof(*fx_info));
(void) memset(fx_info,0,sizeof(*fx_info));
fx_info->exception=AcquireExceptionInfo();
fx_info->images=images;
fx_info->colors=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->symbols=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory,
RelinquishMagickMemory);
fx_info->view=(CacheView **) AcquireQuantumMemory(GetImageListLength(
fx_info->images),sizeof(*fx_info->view));
if (fx_info->view == (CacheView **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
i=0;
next=GetFirstImageInList(fx_info->images);
for ( ; next != (Image *) NULL; next=next->next)
{
fx_info->view[i]=AcquireVirtualCacheView(next,exception);
i++;
}
fx_info->random_info=AcquireRandomInfo();
fx_info->expression=ConstantString(expression);
fx_info->file=stderr;
/*
Convert compound to simple operators.
*/
fx_op[1]='\0';
*fx_op=(unsigned char) BitwiseAndAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"&=",(char *) fx_op);
*fx_op=(unsigned char) BitwiseOrAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"|=",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"<<=",(char *) fx_op);
*fx_op=(unsigned char) RightShiftAssignmentOperator;
(void) SubstituteString(&fx_info->expression,">>=",(char *) fx_op);
*fx_op=(unsigned char) PowerAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"^=",(char *) fx_op);
*fx_op=(unsigned char) ModuloAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"%=",(char *) fx_op);
*fx_op=(unsigned char) PlusAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"+=",(char *) fx_op);
*fx_op=(unsigned char) SubtractAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"-=",(char *) fx_op);
*fx_op=(unsigned char) MultiplyAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"*=",(char *) fx_op);
*fx_op=(unsigned char) DivideAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"/=",(char *) fx_op);
*fx_op=(unsigned char) IncrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"++",(char *) fx_op);
*fx_op=(unsigned char) DecrementAssignmentOperator;
(void) SubstituteString(&fx_info->expression,"--",(char *) fx_op);
*fx_op=(unsigned char) LeftShiftOperator;
(void) SubstituteString(&fx_info->expression,"<<",(char *) fx_op);
*fx_op=(unsigned char) RightShiftOperator;
(void) SubstituteString(&fx_info->expression,">>",(char *) fx_op);
*fx_op=(unsigned char) LessThanEqualOperator;
(void) SubstituteString(&fx_info->expression,"<=",(char *) fx_op);
*fx_op=(unsigned char) GreaterThanEqualOperator;
(void) SubstituteString(&fx_info->expression,">=",(char *) fx_op);
*fx_op=(unsigned char) EqualOperator;
(void) SubstituteString(&fx_info->expression,"==",(char *) fx_op);
*fx_op=(unsigned char) NotEqualOperator;
(void) SubstituteString(&fx_info->expression,"!=",(char *) fx_op);
*fx_op=(unsigned char) LogicalAndOperator;
(void) SubstituteString(&fx_info->expression,"&&",(char *) fx_op);
*fx_op=(unsigned char) LogicalOrOperator;
(void) SubstituteString(&fx_info->expression,"||",(char *) fx_op);
*fx_op=(unsigned char) ExponentialNotation;
(void) SubstituteString(&fx_info->expression,"**",(char *) fx_op);
/*
Force right-to-left associativity for unary negation.
*/
(void) SubstituteString(&fx_info->expression,"-","-1.0*");
(void) SubstituteString(&fx_info->expression,"^-1.0*","^-");
(void) SubstituteString(&fx_info->expression,"E-1.0*","E-");
(void) SubstituteString(&fx_info->expression,"e-1.0*","e-");
(void) SubstituteString(&fx_info->expression," ",""); /* compact string */
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y F x I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyFxInfo() deallocates memory associated with an FxInfo structure.
%
% The format of the DestroyFxInfo method is:
%
% ImageInfo *DestroyFxInfo(ImageInfo *fx_info)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
*/
MagickPrivate FxInfo *DestroyFxInfo(FxInfo *fx_info)
{
ssize_t
i;
fx_info->exception=DestroyExceptionInfo(fx_info->exception);
fx_info->expression=DestroyString(fx_info->expression);
fx_info->symbols=DestroySplayTree(fx_info->symbols);
fx_info->colors=DestroySplayTree(fx_info->colors);
for (i=(ssize_t) GetImageListLength(fx_info->images)-1; i >= 0; i--)
fx_info->view[i]=DestroyCacheView(fx_info->view[i]);
fx_info->view=(CacheView **) RelinquishMagickMemory(fx_info->view);
fx_info->random_info=DestroyRandomInfo(fx_info->random_info);
fx_info=(FxInfo *) RelinquishMagickMemory(fx_info);
return(fx_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ F x E v a l u a t e C h a n n e l E x p r e s s i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxEvaluateChannelExpression() evaluates an expression and returns the
% results.
%
% The format of the FxEvaluateExpression method is:
%
% double FxEvaluateChannelExpression(FxInfo *fx_info,
% const PixelChannel channel,const ssize_t x,const ssize_t y,
% double *alpha,Exceptioninfo *exception)
% double FxEvaluateExpression(FxInfo *fx_info,
% double *alpha,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o fx_info: the fx info.
%
% o channel: the channel.
%
% o x,y: the pixel position.
%
% o alpha: the result.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline const double *GetFxSymbolValue(FxInfo *magick_restrict fx_info,
const char *symbol)
{
return((const double *) GetValueFromSplayTree(fx_info->symbols,symbol));
}
static inline MagickBooleanType SetFxSymbolValue(
FxInfo *magick_restrict fx_info,const char *magick_restrict symbol,
double const value)
{
double
*object;
object=(double *) GetValueFromSplayTree(fx_info->symbols,symbol);
if (object != (double *) NULL)
{
*object=value;
return(MagickTrue);
}
object=(double *) AcquireMagickMemory(sizeof(*object));
if (object == (double *) NULL)
{
(void) ThrowMagickException(fx_info->exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
fx_info->images->filename);
return(MagickFalse);
}
*object=value;
return(AddValueToSplayTree(fx_info->symbols,ConstantString(symbol),object));
}
static double FxChannelStatistics(FxInfo *fx_info,Image *image,
PixelChannel channel,const char *symbol,ExceptionInfo *exception)
{
ChannelType
channel_mask;
char
key[MagickPathExtent];
const double
*value;
double
statistic;
const char
*p;
channel_mask=UndefinedChannel;
for (p=symbol; (*p != '.') && (*p != '\0'); p++) ;
if (*p == '.')
{
ssize_t
option;
option=ParseCommandOption(MagickPixelChannelOptions,MagickTrue,p+1);
if (option >= 0)
{
channel=(PixelChannel) option;
channel_mask=SetPixelChannelMask(image,(ChannelType)
(1UL << channel));
}
}
(void) FormatLocaleString(key,MagickPathExtent,"%p.%.20g.%s",(void *) image,
(double) channel,symbol);
value=GetFxSymbolValue(fx_info,key);
if (value != (const double *) NULL)
{
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
return(QuantumScale*(*value));
}
statistic=0.0;
if (LocaleNCompare(symbol,"depth",5) == 0)
{
size_t
depth;
depth=GetImageDepth(image,exception);
statistic=(double) depth;
}
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=kurtosis;
}
if (LocaleNCompare(symbol,"maxima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=maxima;
}
if (LocaleNCompare(symbol,"mean",4) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=mean;
}
if (LocaleNCompare(symbol,"median",6) == 0)
{
double
median;
(void) GetImageMedian(image,&median,exception);
statistic=median;
}
if (LocaleNCompare(symbol,"minima",6) == 0)
{
double
maxima,
minima;
(void) GetImageRange(image,&minima,&maxima,exception);
statistic=minima;
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
{
double
kurtosis,
skewness;
(void) GetImageKurtosis(image,&kurtosis,&skewness,exception);
statistic=skewness;
}
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
{
double
mean,
standard_deviation;
(void) GetImageMean(image,&mean,&standard_deviation,exception);
statistic=standard_deviation;
}
if (channel_mask != UndefinedChannel)
(void) SetPixelChannelMask(image,channel_mask);
if (SetFxSymbolValue(fx_info,key,statistic) == MagickFalse)
return(0.0);
return(QuantumScale*statistic);
}
static double
FxEvaluateSubexpression(FxInfo *,const PixelChannel,const ssize_t,
const ssize_t,const char *,const size_t,double *,ExceptionInfo *);
static inline MagickBooleanType IsFxFunction(const char *expression,
const char *name,const size_t length)
{
int
c;
size_t
i;
for (i=0; i <= length; i++)
if (expression[i] == '\0')
return(MagickFalse);
c=expression[length];
if ((LocaleNCompare(expression,name,length) == 0) &&
((isspace((int) ((unsigned char) c)) == 0) || (c == '(')))
return(MagickTrue);
return(MagickFalse);
}
static inline double FxGCD(const double alpha,const double beta,
const size_t depth)
{
#define FxMaxFunctionDepth 200
if (alpha < beta)
return(FxGCD(beta,alpha,depth+1));
if ((fabs(beta) < 0.001) || (depth >= FxMaxFunctionDepth))
return(alpha);
return(FxGCD(beta,alpha-beta*floor(alpha/beta),depth+1));
}
static inline const char *FxSubexpression(const char *expression,
ExceptionInfo *exception)
{
const char
*subexpression;
ssize_t
level;
level=0;
subexpression=expression;
while ((*subexpression != '\0') &&
((level != 1) || (strchr(")",(int) *subexpression) == (char *) NULL)))
{
if (strchr("(",(int) *subexpression) != (char *) NULL)
level++;
else
if (strchr(")",(int) *subexpression) != (char *) NULL)
level--;
subexpression++;
}
if (*subexpression == '\0')
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnbalancedParenthesis","`%s'",expression);
return(subexpression);
}
static double FxGetSymbol(FxInfo *fx_info,const PixelChannel channel,
const ssize_t x,const ssize_t y,const char *expression,const size_t depth,
ExceptionInfo *exception)
{
char
*q,
symbol[MagickPathExtent];
const char
*artifact,
*p;
const double
*value;
double
alpha,
beta;
Image
*image;
MagickBooleanType
status;
PixelInfo
pixel;
PointInfo
point;
ssize_t
i;
size_t
level;
p=expression;
i=GetImageIndexInList(fx_info->images);
level=0;
point.x=(double) x;
point.y=(double) y;
if (isalpha((int) ((unsigned char) *(p+1))) == 0)
{
char
*subexpression;
subexpression=AcquireString(expression);
if (strchr("suv",(int) *p) != (char *) NULL)
{
switch (*p)
{
case 's':
default:
{
i=GetImageIndexInList(fx_info->images);
break;
}
case 'u': i=0; break;
case 'v': i=1; break;
}
p++;
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
i=(ssize_t) alpha;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
if ((*p == 'p') && (isalpha((int) ((unsigned char) *(p+1))) == 0))
{
p++;
if (*p == '{')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '{')
level++;
else
if (*p == '}')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x=alpha;
point.y=beta;
if (*p != '\0')
p++;
}
else
if (*p == '[')
{
level++;
q=subexpression;
for (p++; *p != '\0'; )
{
if (*p == '[')
level++;
else
if (*p == ']')
{
level--;
if (level == 0)
break;
}
*q++=(*p++);
}
*q='\0';
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,
depth,&beta,exception);
point.x+=alpha;
point.y+=beta;
if (*p != '\0')
p++;
}
if (*p == '.')
p++;
}
subexpression=DestroyString(subexpression);
}
image=GetImageFromList(fx_info->images,i);
if (image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"NoSuchImage","`%s'",expression);
return(0.0);
}
i=GetImageIndexInList(image);
GetPixelInfo(image,&pixel);
status=InterpolatePixelInfo(image,fx_info->view[i],image->interpolate,
point.x,point.y,&pixel,exception);
(void) status;
if ((*p != '\0') && (*(p+1) != '\0') && (*(p+2) != '\0') &&
(LocaleCompare(p,"intensity") != 0) && (LocaleCompare(p,"luma") != 0) &&
(LocaleCompare(p,"luminance") != 0) && (LocaleCompare(p,"hue") != 0) &&
(LocaleCompare(p,"saturation") != 0) &&
(LocaleCompare(p,"lightness") != 0))
{
char
name[MagickPathExtent];
size_t
length;
(void) CopyMagickString(name,p,MagickPathExtent);
length=strlen(name);
for (q=name+length-1; q > name; q--)
{
if (*q == ')')
break;
if (*q == '.')
{
*q='\0';
break;
}
}
q=name;
if ((*q != '\0') && (*(q+1) != '\0') && (*(q+2) != '\0') &&
(GetFxSymbolValue(fx_info,name) == (const double *) NULL))
{
PixelInfo
*color;
color=(PixelInfo *) GetValueFromSplayTree(fx_info->colors,name);
if (color != (PixelInfo *) NULL)
{
pixel=(*color);
p+=length;
}
else
{
status=QueryColorCompliance(name,AllCompliance,&pixel,
fx_info->exception);
if (status != MagickFalse)
{
(void) AddValueToSplayTree(fx_info->colors,
ConstantString(name),ClonePixelInfo(&pixel));
p+=length;
}
}
}
}
(void) CopyMagickString(symbol,p,MagickPathExtent);
(void) StripMagickString(symbol);
if (*symbol == '\0')
{
switch (channel)
{
case RedPixelChannel: return(QuantumScale*pixel.red);
case GreenPixelChannel: return(QuantumScale*pixel.green);
case BluePixelChannel: return(QuantumScale*pixel.blue);
case BlackPixelChannel:
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ImageError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
case AlphaPixelChannel:
{
if (pixel.alpha_trait == UndefinedPixelTrait)
return(1.0);
alpha=(double) (QuantumScale*pixel.alpha);
return(alpha);
}
case CompositePixelChannel:
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
case IndexPixelChannel:
return(0.0);
default:
break;
}
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",p);
return(0.0);
}
switch (*symbol)
{
case 'A':
case 'a':
{
if (LocaleCompare(symbol,"a") == 0)
return((QuantumScale*pixel.alpha));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(symbol,"b") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(symbol,"channel",7) != MagickFalse)
{
GeometryInfo
channel_info;
MagickStatusType
flags;
flags=ParseGeometry(symbol+7,&channel_info);
if (image->colorspace == CMYKColorspace)
switch (channel)
{
case CyanPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case MagentaPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case YellowPixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
case AlphaPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
default:
return(0.0);
}
switch (channel)
{
case RedPixelChannel:
{
if ((flags & RhoValue) == 0)
return(0.0);
return(channel_info.rho);
}
case GreenPixelChannel:
{
if ((flags & SigmaValue) == 0)
return(0.0);
return(channel_info.sigma);
}
case BluePixelChannel:
{
if ((flags & XiValue) == 0)
return(0.0);
return(channel_info.xi);
}
case BlackPixelChannel:
{
if ((flags & ChiValue) == 0)
return(0.0);
return(channel_info.chi);
}
case AlphaPixelChannel:
{
if ((flags & PsiValue) == 0)
return(0.0);
return(channel_info.psi);
}
default:
return(0.0);
}
}
if (LocaleCompare(symbol,"c") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'D':
case 'd':
{
if (LocaleNCompare(symbol,"depth",5) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(symbol,"extent") == 0)
{
if (image->extent != 0)
return((double) image->extent);
return((double) GetBlobSize(image));
}
break;
}
case 'G':
case 'g':
{
if (LocaleCompare(symbol,"g") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'K':
case 'k':
{
if (LocaleNCompare(symbol,"kurtosis",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"k") == 0)
{
if (image->colorspace != CMYKColorspace)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ColorSeparatedImageRequired","`%s'",
image->filename);
return(0.0);
}
return(QuantumScale*pixel.black);
}
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(symbol,"h") == 0)
return((double) image->rows);
if (LocaleCompare(symbol,"hue") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(hue);
}
break;
}
case 'I':
case 'i':
{
if ((LocaleCompare(symbol,"image.depth") == 0) ||
(LocaleCompare(symbol,"image.minima") == 0) ||
(LocaleCompare(symbol,"image.maxima") == 0) ||
(LocaleCompare(symbol,"image.mean") == 0) ||
(LocaleCompare(symbol,"image.kurtosis") == 0) ||
(LocaleCompare(symbol,"image.skewness") == 0) ||
(LocaleCompare(symbol,"image.standard_deviation") == 0))
return(FxChannelStatistics(fx_info,image,channel,symbol+6,exception));
if (LocaleCompare(symbol,"image.resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"image.resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"intensity") == 0)
{
Quantum
quantum_pixel[MaxPixelChannels];
SetPixelViaPixelInfo(image,&pixel,quantum_pixel);
return(QuantumScale*GetPixelIntensity(image,quantum_pixel));
}
if (LocaleCompare(symbol,"i") == 0)
return((double) x);
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(symbol,"j") == 0)
return((double) y);
break;
}
case 'L':
case 'l':
{
if (LocaleCompare(symbol,"lightness") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(lightness);
}
if (LocaleCompare(symbol,"luma") == 0)
{
double
luma;
luma=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luma);
}
if (LocaleCompare(symbol,"luminance") == 0)
{
double
luminence;
luminence=0.212656*pixel.red+0.715158*pixel.green+0.072186*pixel.blue;
return(QuantumScale*luminence);
}
break;
}
case 'M':
case 'm':
{
if (LocaleNCompare(symbol,"maxima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"mean",4) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"median",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"minima",6) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleCompare(symbol,"m") == 0)
return(QuantumScale*pixel.green);
break;
}
case 'N':
case 'n':
{
if (LocaleCompare(symbol,"n") == 0)
return((double) GetImageListLength(fx_info->images));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(symbol,"o") == 0)
return(QuantumScale*pixel.alpha);
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(symbol,"page.height") == 0)
return((double) image->page.height);
if (LocaleCompare(symbol,"page.width") == 0)
return((double) image->page.width);
if (LocaleCompare(symbol,"page.x") == 0)
return((double) image->page.x);
if (LocaleCompare(symbol,"page.y") == 0)
return((double) image->page.y);
if (LocaleCompare(symbol,"printsize.x") == 0)
return(PerceptibleReciprocal(image->resolution.x)*image->columns);
if (LocaleCompare(symbol,"printsize.y") == 0)
return(PerceptibleReciprocal(image->resolution.y)*image->rows);
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(symbol,"quality") == 0)
return((double) image->quality);
break;
}
case 'R':
case 'r':
{
if (LocaleCompare(symbol,"resolution.x") == 0)
return(image->resolution.x);
if (LocaleCompare(symbol,"resolution.y") == 0)
return(image->resolution.y);
if (LocaleCompare(symbol,"r") == 0)
return(QuantumScale*pixel.red);
break;
}
case 'S':
case 's':
{
if (LocaleCompare(symbol,"saturation") == 0)
{
double
hue,
lightness,
saturation;
ConvertRGBToHSL(pixel.red,pixel.green,pixel.blue,&hue,&saturation,
&lightness);
return(saturation);
}
if (LocaleNCompare(symbol,"skewness",8) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
if (LocaleNCompare(symbol,"standard_deviation",18) == 0)
return(FxChannelStatistics(fx_info,image,channel,symbol,exception));
break;
}
case 'T':
case 't':
{
if (LocaleCompare(symbol,"t") == 0)
return((double) GetImageIndexInList(fx_info->images));
break;
}
case 'W':
case 'w':
{
if (LocaleCompare(symbol,"w") == 0)
return((double) image->columns);
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(symbol,"y") == 0)
return(QuantumScale*pixel.blue);
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(symbol,"z") == 0)
return((double) GetImageDepth(image,fx_info->exception));
break;
}
default:
break;
}
value=GetFxSymbolValue(fx_info,symbol);
if (value != (const double *) NULL)
return(*value);
artifact=GetImageArtifact(image,symbol);
if (artifact != (const char *) NULL)
return(StringToDouble(artifact,(char **) NULL));
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UndefinedVariable","`%s'",symbol);
(void) SetFxSymbolValue(fx_info,symbol,0.0);
return(0.0);
}
static const char *FxOperatorPrecedence(const char *expression,
ExceptionInfo *exception)
{
typedef enum
{
UndefinedPrecedence,
NullPrecedence,
BitwiseComplementPrecedence,
ExponentPrecedence,
ExponentialNotationPrecedence,
MultiplyPrecedence,
AdditionPrecedence,
ShiftPrecedence,
RelationalPrecedence,
EquivalencyPrecedence,
BitwiseAndPrecedence,
BitwiseOrPrecedence,
LogicalAndPrecedence,
LogicalOrPrecedence,
TernaryPrecedence,
AssignmentPrecedence,
CommaPrecedence,
SeparatorPrecedence
} FxPrecedence;
FxPrecedence
precedence,
target;
const char
*subexpression;
int
c;
size_t
level;
c=(-1);
level=0;
subexpression=(const char *) NULL;
target=NullPrecedence;
while ((c != '\0') && (*expression != '\0'))
{
precedence=UndefinedPrecedence;
if ((isspace((int) ((unsigned char) *expression)) != 0) || (c == (int) '@'))
{
expression++;
continue;
}
switch (*expression)
{
case 'A':
case 'a':
{
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
expression+=5;
break;
}
#endif
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
expression+=5;
break;
}
break;
}
case 'E':
case 'e':
{
if ((isdigit((int) ((unsigned char) c)) != 0) &&
((LocaleNCompare(expression,"E+",2) == 0) ||
(LocaleNCompare(expression,"E-",2) == 0)))
{
expression+=2; /* scientific notation */
break;
}
}
case 'J':
case 'j':
{
if ((IsFxFunction(expression,"j0",2) != MagickFalse) ||
(IsFxFunction(expression,"j1",2) != MagickFalse))
{
expression+=2;
break;
}
break;
}
case '#':
{
while (isxdigit((int) ((unsigned char) *(expression+1))) != 0)
expression++;
break;
}
default:
break;
}
if ((c == (int) '{') || (c == (int) '['))
level++;
else
if ((c == (int) '}') || (c == (int) ']'))
level--;
if (level == 0)
switch ((unsigned char) *expression)
{
case '~':
case '!':
{
precedence=BitwiseComplementPrecedence;
break;
}
case '^':
case '@':
{
precedence=ExponentPrecedence;
break;
}
default:
{
if (((c != 0) && ((isdigit((int) ((unsigned char) c)) != 0) ||
(strchr(")",c) != (char *) NULL))) &&
(((islower((int) ((unsigned char) *expression)) != 0) ||
(strchr("(",(int) ((unsigned char) *expression)) != (char *) NULL)) ||
((isdigit((int) ((unsigned char) c)) == 0) &&
(isdigit((int) ((unsigned char) *expression)) != 0))) &&
(strchr("xy",(int) ((unsigned char) *expression)) == (char *) NULL))
precedence=MultiplyPrecedence;
break;
}
case '*':
case '/':
case '%':
{
precedence=MultiplyPrecedence;
break;
}
case '+':
case '-':
{
if ((strchr("(+-/*%:&^|<>~,",c) == (char *) NULL) ||
(isalpha((int) ((unsigned char) c)) != 0))
precedence=AdditionPrecedence;
break;
}
case BitwiseAndAssignmentOperator:
case BitwiseOrAssignmentOperator:
case LeftShiftAssignmentOperator:
case RightShiftAssignmentOperator:
case PowerAssignmentOperator:
case ModuloAssignmentOperator:
case PlusAssignmentOperator:
case SubtractAssignmentOperator:
case MultiplyAssignmentOperator:
case DivideAssignmentOperator:
case IncrementAssignmentOperator:
case DecrementAssignmentOperator:
{
precedence=AssignmentPrecedence;
break;
}
case LeftShiftOperator:
case RightShiftOperator:
{
precedence=ShiftPrecedence;
break;
}
case '<':
case LessThanEqualOperator:
case GreaterThanEqualOperator:
case '>':
{
precedence=RelationalPrecedence;
break;
}
case EqualOperator:
case NotEqualOperator:
{
precedence=EquivalencyPrecedence;
break;
}
case '&':
{
precedence=BitwiseAndPrecedence;
break;
}
case '|':
{
precedence=BitwiseOrPrecedence;
break;
}
case LogicalAndOperator:
{
precedence=LogicalAndPrecedence;
break;
}
case LogicalOrOperator:
{
precedence=LogicalOrPrecedence;
break;
}
case ExponentialNotation:
{
precedence=ExponentialNotationPrecedence;
break;
}
case ':':
case '?':
{
precedence=TernaryPrecedence;
break;
}
case '=':
{
precedence=AssignmentPrecedence;
break;
}
case ',':
{
precedence=CommaPrecedence;
break;
}
case ';':
{
precedence=SeparatorPrecedence;
break;
}
}
if ((precedence == BitwiseComplementPrecedence) ||
(precedence == TernaryPrecedence) ||
(precedence == AssignmentPrecedence))
{
if (precedence > target)
{
/*
Right-to-left associativity.
*/
target=precedence;
subexpression=expression;
}
}
else
if (precedence >= target)
{
/*
Left-to-right associativity.
*/
target=precedence;
subexpression=expression;
}
if (strchr("(",(int) *expression) != (char *) NULL)
expression=FxSubexpression(expression,exception);
c=(int) (*expression++);
}
return(subexpression);
}
static double FxEvaluateSubexpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
const char *expression,const size_t depth,double *beta,
ExceptionInfo *exception)
{
#define FxMaxParenthesisDepth 58
#define FxMaxSubexpressionDepth 200
#define FxReturn(value) \
{ \
subexpression=DestroyString(subexpression); \
return(value); \
}
#define FxParseConditional(subexpression,sentinal,p,q) \
{ \
p=subexpression; \
for (q=(char *) p; (*q != (sentinal)) && (*q != '\0'); q++) \
if (*q == '(') \
{ \
for (q++; (*q != ')') && (*q != '\0'); q++); \
if (*q == '\0') \
break; \
} \
if (*q == '\0') \
{ \
(void) ThrowMagickException(exception,GetMagickModule(), \
OptionError,"UnableToParseExpression","`%s'",subexpression); \
FxReturn(0.0); \
} \
if (strlen(q) == 1) \
*(q+1)='\0'; \
*q='\0'; \
}
char
*q,
*subexpression;
double
alpha,
gamma,
sans,
value;
const char
*p;
*beta=0.0;
sans=0.0;
subexpression=AcquireString(expression);
*subexpression='\0';
if (depth > FxMaxSubexpressionDepth)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"UnableToParseExpression","`%s'",expression);
FxReturn(0.0);
}
if (exception->severity >= ErrorException)
FxReturn(0.0);
while (isspace((int) ((unsigned char) *expression)) != 0)
expression++;
if (*expression == '\0')
FxReturn(0.0);
p=FxOperatorPrecedence(expression,exception);
if (p != (const char *) NULL)
{
(void) CopyMagickString(subexpression,expression,(size_t)
(p-expression+1));
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
switch ((unsigned char) *p)
{
case '~':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) (~(size_t) *beta);
FxReturn(*beta);
}
case '!':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta == 0.0 ? 1.0 : 0.0);
}
case '^':
{
*beta=pow(alpha,FxEvaluateSubexpression(fx_info,channel,x,y,++p,
depth+1,beta,exception));
FxReturn(*beta);
}
case '*':
case ExponentialNotation:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha*(*beta));
}
case '/':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(PerceptibleReciprocal(*beta)*alpha);
}
case '%':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fmod(alpha,*beta));
}
case '+':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha+(*beta));
}
case '-':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha-(*beta));
}
case BitwiseAndAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) & (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case BitwiseOrAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(double) ((size_t) (alpha+0.5) | (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) << (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case RightShiftAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (*beta+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
value=(double) ((size_t) (alpha+0.5) >> (size_t) (*beta+0.5));
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PowerAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=pow(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ModuloAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=fmod(alpha,*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case PlusAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case SubtractAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case MultiplyAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DivideAssignmentOperator:
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha*PerceptibleReciprocal(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case IncrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha+1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case DecrementAssignmentOperator:
{
if (*subexpression == '\0')
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=alpha-1.0;
if (*subexpression == '\0')
{
if (SetFxSymbolValue(fx_info,p,value) == MagickFalse)
return(0.0);
}
else
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case LeftShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) << (size_t) (gamma+0.5));
FxReturn(*beta);
}
case RightShiftOperator:
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
if ((size_t) (gamma+0.5) >= (8*sizeof(size_t)))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ShiftCountOverflow","`%s'",subexpression);
FxReturn(0.0);
}
*beta=(double) ((size_t) (alpha+0.5) >> (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '<':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha < *beta ? 1.0 : 0.0);
}
case LessThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha <= *beta ? 1.0 : 0.0);
}
case '>':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha > *beta ? 1.0 : 0.0);
}
case GreaterThanEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha >= *beta ? 1.0 : 0.0);
}
case EqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) < MagickEpsilon ? 1.0 : 0.0);
}
case NotEqualOperator:
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(fabs(alpha-(*beta)) >= MagickEpsilon ? 1.0 : 0.0);
}
case '&':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) & (size_t) (gamma+0.5));
FxReturn(*beta);
}
case '|':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
*beta=(double) ((size_t) (alpha+0.5) | (size_t) (gamma+0.5));
FxReturn(*beta);
}
case LogicalAndOperator:
{
p++;
if (alpha <= 0.0)
{
*beta=0.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case LogicalOrOperator:
{
p++;
if (alpha > 0.0)
{
*beta=1.0;
FxReturn(*beta);
}
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
*beta=(gamma > 0.0) ? 1.0 : 0.0;
FxReturn(*beta);
}
case '?':
{
(void) CopyMagickString(subexpression,++p,MagickPathExtent-1);
FxParseConditional(subexpression,':',p,q);
if (fabs(alpha) >= MagickEpsilon)
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(gamma);
}
case '=':
{
q=subexpression;
while (isalpha((int) ((unsigned char) *q)) != 0)
q++;
if (*q != '\0')
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnableToParseExpression","`%s'",subexpression);
FxReturn(0.0);
}
ClearMagickException(exception);
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
value=(*beta);
if (SetFxSymbolValue(fx_info,subexpression,value) == MagickFalse)
return(0.0);
FxReturn(*beta);
}
case ',':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(alpha);
}
case ';':
{
*beta=FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,beta,
exception);
FxReturn(*beta);
}
default:
{
gamma=alpha*FxEvaluateSubexpression(fx_info,channel,x,y,++p,depth+1,
beta,exception);
FxReturn(gamma);
}
}
}
if (strchr("(",(int) *expression) != (char *) NULL)
{
size_t
length;
if (depth >= FxMaxParenthesisDepth)
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"ParenthesisNestedTooDeeply","`%s'",expression);
length=CopyMagickString(subexpression,expression+1,MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,subexpression,depth+1,
beta,exception);
FxReturn(gamma);
}
switch (*expression)
{
case '+':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(1.0*gamma);
}
case '-':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn(-1.0*gamma);
}
case '~':
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,expression+1,depth+1,
beta,exception);
FxReturn((double) (~(size_t) (gamma+0.5)));
}
case 'A':
case 'a':
{
if (IsFxFunction(expression,"abs",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(fabs(alpha));
}
#if defined(MAGICKCORE_HAVE_ACOSH)
if (IsFxFunction(expression,"acosh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(acosh(alpha));
}
#endif
if (IsFxFunction(expression,"acos",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(acos(alpha));
}
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"airy",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
gamma=2.0*j1((MagickPI*alpha))/(MagickPI*alpha);
FxReturn(gamma*gamma);
}
#endif
#if defined(MAGICKCORE_HAVE_ASINH)
if (IsFxFunction(expression,"asinh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(asinh(alpha));
}
#endif
if (IsFxFunction(expression,"asin",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(asin(alpha));
}
if (IsFxFunction(expression,"alt",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(((ssize_t) alpha) & 0x01 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"atan2",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atan2(alpha,*beta));
}
#if defined(MAGICKCORE_HAVE_ATANH)
if (IsFxFunction(expression,"atanh",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(atanh(alpha));
}
#endif
if (IsFxFunction(expression,"atan",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(atan(alpha));
}
if (LocaleCompare(expression,"a") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'B':
case 'b':
{
if (LocaleCompare(expression,"b") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'C':
case 'c':
{
if (IsFxFunction(expression,"ceil",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(ceil(alpha));
}
if (IsFxFunction(expression,"clamp",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha < 0.0)
FxReturn(0.0);
if (alpha > 1.0)
FxReturn(1.0);
FxReturn(alpha);
}
if (IsFxFunction(expression,"cosh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(cosh(alpha));
}
if (IsFxFunction(expression,"cos",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(cos(alpha));
}
if (LocaleCompare(expression,"c") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'D':
case 'd':
{
if (IsFxFunction(expression,"debug",5) != MagickFalse)
{
const char
*type;
size_t
length;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
switch (fx_info->images->colorspace)
{
case CMYKColorspace:
{
switch (channel)
{
case CyanPixelChannel: type="cyan"; break;
case MagentaPixelChannel: type="magenta"; break;
case YellowPixelChannel: type="yellow"; break;
case AlphaPixelChannel: type="alpha"; break;
case BlackPixelChannel: type="black"; break;
default: type="unknown"; break;
}
break;
}
case GRAYColorspace:
{
switch (channel)
{
case RedPixelChannel: type="gray"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
default:
{
switch (channel)
{
case RedPixelChannel: type="red"; break;
case GreenPixelChannel: type="green"; break;
case BluePixelChannel: type="blue"; break;
case AlphaPixelChannel: type="alpha"; break;
default: type="unknown"; break;
}
break;
}
}
*subexpression='\0';
length=1;
if (strlen(expression) > 6)
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent);
if (length != 0)
subexpression[length-1]='\0';
if (fx_info->file != (FILE *) NULL)
(void) FormatLocaleFile(fx_info->file,"%s[%.20g,%.20g].%s: "
"%s=%.*g\n",fx_info->images->filename,(double) x,(double) y,type,
subexpression,GetMagickPrecision(),alpha);
FxReturn(alpha);
}
if (IsFxFunction(expression,"do",2) != MagickFalse)
{
size_t
length;
/*
Parse do(expression,condition test).
*/
length=CopyMagickString(subexpression,expression+3,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
}
FxReturn(alpha);
}
if (IsFxFunction(expression,"drc",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((alpha/(*beta*(alpha-1.0)+1.0)));
}
break;
}
case 'E':
case 'e':
{
if (LocaleCompare(expression,"epsilon") == 0)
FxReturn(MagickEpsilon);
#if defined(MAGICKCORE_HAVE_ERF)
if (IsFxFunction(expression,"erf",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(erf(alpha));
}
#endif
if (IsFxFunction(expression,"exp",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(exp(alpha));
}
if (LocaleCompare(expression,"e") == 0)
FxReturn(2.7182818284590452354);
break;
}
case 'F':
case 'f':
{
if (IsFxFunction(expression,"floor",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"for",3) != MagickFalse)
{
size_t
length;
/*
Parse for(initialization, condition test, expression).
*/
length=CopyMagickString(subexpression,expression+4,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
}
FxReturn(alpha);
}
break;
}
case 'G':
case 'g':
{
if (IsFxFunction(expression,"gauss",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(exp((-alpha*alpha/2.0))/sqrt(2.0*MagickPI));
}
if (IsFxFunction(expression,"gcd",3) != MagickFalse)
{
double
gcd;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
if (IsNaN(alpha) != 0)
FxReturn(alpha);
gcd=FxGCD(alpha,*beta,0);
FxReturn(gcd);
}
if (LocaleCompare(expression,"g") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'H':
case 'h':
{
if (LocaleCompare(expression,"h") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (LocaleCompare(expression,"hue") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"hypot",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn(hypot(alpha,*beta));
}
break;
}
case 'K':
case 'k':
{
if (LocaleCompare(expression,"k") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'I':
case 'i':
{
if (IsFxFunction(expression,"if",2) != MagickFalse)
{
size_t
length;
/*
Parse if(condition test, true-expression, false-expression).
*/
length=CopyMagickString(subexpression,expression+3,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
(void) CopyMagickString(subexpression,q+1,MagickPathExtent-1);
FxParseConditional(subexpression,',',p,q);
if (fabs(alpha) >= MagickEpsilon)
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,beta,
exception);
else
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,beta,
exception);
FxReturn(alpha);
}
if (LocaleCompare(expression,"intensity") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"int",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(floor(alpha));
}
if (IsFxFunction(expression,"isnan",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
FxReturn((double) !!IsNaN(alpha));
}
if (LocaleCompare(expression,"i") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'J':
case 'j':
{
if (LocaleCompare(expression,"j") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
#if defined(MAGICKCORE_HAVE_J0)
if (IsFxFunction(expression,"j0",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j0(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"j1",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(j1(alpha));
}
#endif
#if defined(MAGICKCORE_HAVE_J1)
if (IsFxFunction(expression,"jinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0.0)
FxReturn(1.0);
FxReturn((2.0*j1((MagickPI*alpha))/(MagickPI*alpha)));
}
#endif
break;
}
case 'L':
case 'l':
{
if (IsFxFunction(expression,"ln",2) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+2,
depth+1,beta,exception);
FxReturn(log(alpha));
}
if (IsFxFunction(expression,"logtwo",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn(log10(alpha)/log10(2.0));
}
if (IsFxFunction(expression,"log",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(log10(alpha));
}
if (LocaleCompare(expression,"lightness") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'M':
case 'm':
{
if (LocaleCompare(expression,"MaxRGB") == 0)
FxReturn(QuantumRange);
if (LocaleNCompare(expression,"maxima",6) == 0)
break;
if (IsFxFunction(expression,"max",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha > *beta ? alpha : *beta);
}
if (LocaleNCompare(expression,"minima",6) == 0)
break;
if (IsFxFunction(expression,"min",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha < *beta ? alpha : *beta);
}
if (IsFxFunction(expression,"mod",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(alpha-floor((alpha*PerceptibleReciprocal(*beta)))*(*beta));
}
if (LocaleCompare(expression,"m") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'N':
case 'n':
{
if (IsFxFunction(expression,"not",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn((double) (alpha < MagickEpsilon));
}
if (LocaleCompare(expression,"n") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'O':
case 'o':
{
if (LocaleCompare(expression,"Opaque") == 0)
FxReturn(1.0);
if (LocaleCompare(expression,"o") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'P':
case 'p':
{
if (LocaleCompare(expression,"phi") == 0)
FxReturn(MagickPHI);
if (LocaleCompare(expression,"pi") == 0)
FxReturn(MagickPI);
if (IsFxFunction(expression,"pow",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(pow(alpha,*beta));
}
if (LocaleCompare(expression,"p") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Q':
case 'q':
{
if (LocaleCompare(expression,"QuantumRange") == 0)
FxReturn(QuantumRange);
if (LocaleCompare(expression,"QuantumScale") == 0)
FxReturn(QuantumScale);
break;
}
case 'R':
case 'r':
{
if (IsFxFunction(expression,"rand",4) != MagickFalse)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FxEvaluateSubexpression)
#endif
alpha=GetPseudoRandomValue(fx_info->random_info);
FxReturn(alpha);
}
if (IsFxFunction(expression,"round",5) != MagickFalse)
{
/*
Round the fraction to nearest integer.
*/
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if ((alpha-floor(alpha)) < (ceil(alpha)-alpha))
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"r") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'S':
case 's':
{
if (LocaleCompare(expression,"saturation") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
if (IsFxFunction(expression,"sign",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(alpha < 0.0 ? -1.0 : 1.0);
}
if (IsFxFunction(expression,"sinc",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
if (alpha == 0)
FxReturn(1.0);
FxReturn(sin((MagickPI*alpha))/(MagickPI*alpha));
}
if (IsFxFunction(expression,"sinh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sinh(alpha));
}
if (IsFxFunction(expression,"sin",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(sin(alpha));
}
if (IsFxFunction(expression,"sqrt",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(sqrt(alpha));
}
if (IsFxFunction(expression,"squish",6) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+6,
depth+1,beta,exception);
FxReturn((1.0/(1.0+exp(-alpha))));
}
if (LocaleCompare(expression,"s") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'T':
case 't':
{
if (IsFxFunction(expression,"tanh",4) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+4,
depth+1,beta,exception);
FxReturn(tanh(alpha));
}
if (IsFxFunction(expression,"tan",3) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+3,
depth+1,beta,exception);
FxReturn(tan(alpha));
}
if (LocaleCompare(expression,"Transparent") == 0)
FxReturn(0.0);
if (IsFxFunction(expression,"trunc",5) != MagickFalse)
{
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,expression+5,
depth+1,beta,exception);
if (alpha >= 0.0)
FxReturn(floor(alpha));
FxReturn(ceil(alpha));
}
if (LocaleCompare(expression,"t") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'U':
case 'u':
{
if (LocaleCompare(expression,"u") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'V':
case 'v':
{
if (LocaleCompare(expression,"v") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'W':
case 'w':
{
if (IsFxFunction(expression,"while",5) != MagickFalse)
{
size_t
length;
/*
Parse while(condition test, expression).
*/
length=CopyMagickString(subexpression,expression+6,
MagickPathExtent-1);
if (length != 0)
subexpression[length-1]='\0';
FxParseConditional(subexpression,',',p,q);
for (alpha=0.0; ; )
{
gamma=FxEvaluateSubexpression(fx_info,channel,x,y,p,depth+1,&sans,
exception);
if (fabs(gamma) < MagickEpsilon)
break;
alpha=FxEvaluateSubexpression(fx_info,channel,x,y,q+1,depth+1,
beta,exception);
}
FxReturn(alpha);
}
if (LocaleCompare(expression,"w") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Y':
case 'y':
{
if (LocaleCompare(expression,"y") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
case 'Z':
case 'z':
{
if (LocaleCompare(expression,"z") == 0)
FxReturn(FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception));
break;
}
default:
break;
}
subexpression=DestroyString(subexpression);
q=(char *) expression;
alpha=InterpretSiPrefixValue(expression,&q);
if (q == expression)
alpha=FxGetSymbol(fx_info,channel,x,y,expression,depth+1,exception);
FxReturn(alpha);
}
MagickPrivate MagickBooleanType FxEvaluateExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
MagickBooleanType
status;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
return(status);
}
MagickExport MagickBooleanType FxPreprocessExpression(FxInfo *fx_info,
double *alpha,ExceptionInfo *exception)
{
FILE
*file;
MagickBooleanType
status;
file=fx_info->file;
fx_info->file=(FILE *) NULL;
status=FxEvaluateChannelExpression(fx_info,GrayPixelChannel,0,0,alpha,
exception);
fx_info->file=file;
return(status);
}
MagickPrivate MagickBooleanType FxEvaluateChannelExpression(FxInfo *fx_info,
const PixelChannel channel,const ssize_t x,const ssize_t y,
double *alpha,ExceptionInfo *exception)
{
double
beta;
beta=0.0;
*alpha=FxEvaluateSubexpression(fx_info,channel,x,y,fx_info->expression,0,
&beta,exception);
return(exception->severity == OptionError ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F x I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FxImage() applies a mathematical expression to the specified image.
%
% The format of the FxImage method is:
%
% Image *FxImage(const Image *image,const char *expression,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o expression: A mathematical expression.
%
% o exception: return any errors or warnings in this structure.
%
*/
static FxInfo **DestroyFxThreadSet(FxInfo **fx_info)
{
ssize_t
i;
assert(fx_info != (FxInfo **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (fx_info[i] != (FxInfo *) NULL)
fx_info[i]=DestroyFxInfo(fx_info[i]);
fx_info=(FxInfo **) RelinquishMagickMemory(fx_info);
return(fx_info);
}
static FxInfo **AcquireFxThreadSet(const Image *image,const char *expression,
ExceptionInfo *exception)
{
char
*fx_expression;
double
alpha;
FxInfo
**fx_info;
ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
fx_info=(FxInfo **) AcquireQuantumMemory(number_threads,sizeof(*fx_info));
if (fx_info == (FxInfo **) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return((FxInfo **) NULL);
}
(void) memset(fx_info,0,number_threads*sizeof(*fx_info));
if (*expression != '@')
fx_expression=ConstantString(expression);
else
fx_expression=FileToString(expression+1,~0UL,exception);
for (i=0; i < (ssize_t) number_threads; i++)
{
MagickBooleanType
status;
fx_info[i]=AcquireFxInfo(image,fx_expression,exception);
if (fx_info[i] == (FxInfo *) NULL)
break;
status=FxPreprocessExpression(fx_info[i],&alpha,exception);
if (status == MagickFalse)
break;
}
fx_expression=DestroyString(fx_expression);
if (i < (ssize_t) number_threads)
fx_info=DestroyFxThreadSet(fx_info);
return(fx_info);
}
MagickExport Image *FxImage(const Image *image,const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "Fx/Image"
CacheView
*fx_view,
*image_view;
FxInfo
**magick_restrict fx_info;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_info=AcquireFxThreadSet(image,expression,exception);
if (fx_info == (FxInfo **) NULL)
return((Image *) NULL);
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (fx_image == (Image *) NULL)
{
fx_info=DestroyFxThreadSet(fx_info);
return((Image *) NULL);
}
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse)
{
fx_info=DestroyFxThreadSet(fx_info);
fx_image=DestroyImage(fx_image);
return((Image *) NULL);
}
/*
Fx image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
fx_view=AcquireAuthenticCacheView(fx_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows, \
GlobExpression(fx_info[0]->expression,"debug(",MagickTrue) == 0 ? 1 : 0)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(fx_view,0,y,fx_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
alpha;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait fx_traits=GetPixelChannelTraits(fx_image,channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if ((fx_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(fx_image,channel,p[i],q);
continue;
}
alpha=0.0;
(void) FxEvaluateChannelExpression(fx_info[id],channel,x,y,&alpha,
exception);
q[i]=ClampToQuantum(QuantumRange*alpha);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FxImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view=DestroyCacheView(fx_view);
image_view=DestroyCacheView(image_view);
fx_info=DestroyFxThreadSet(fx_info);
if (status == MagickFalse)
fx_image=DestroyImage(fx_image);
return(fx_image);
}
|
transform.h | /*!
* Copyright 2018 XGBoost contributors
*/
#ifndef XGBOOST_COMMON_TRANSFORM_H_
#define XGBOOST_COMMON_TRANSFORM_H_
#include <dmlc/omp.h>
#include <dmlc/common.h>
#include <xgboost/data.h>
#include <utility>
#include <vector>
#include <type_traits> // enable_if
#include "xgboost/host_device_vector.h"
#include "xgboost/span.h"
#include "common.h"
#if defined (__CUDACC__)
#include "device_helpers.cuh"
#endif // defined (__CUDACC__)
namespace xgboost {
namespace common {
constexpr size_t kBlockThreads = 256;
namespace detail {
#if defined(__CUDACC__)
template <typename Functor, typename... SpanType>
__global__ void LaunchCUDAKernel(Functor _func, Range _range,
SpanType... _spans) {
for (auto i : dh::GridStrideRange(*_range.begin(), *_range.end())) {
_func(i, _spans...);
}
}
#endif // defined(__CUDACC__)
} // namespace detail
/*! \brief Do Transformation on HostDeviceVectors.
*
* \tparam CompiledWithCuda A bool parameter used to distinguish compilation
* trajectories, users do not need to use it.
*
* Note: Using Transform is a VERY tricky thing to do. Transform uses template
* argument to duplicate itself into two different types, one for CPU,
* another for CUDA. The trick is not without its flaw:
*
* If you use it in a function that can be compiled by both nvcc and host
* compiler, the behaviour is un-defined! Because your function is NOT
* duplicated by `CompiledWithCuda`. At link time, cuda compiler resolution
* will merge functions with same signature.
*/
template <bool CompiledWithCuda = WITH_CUDA()>
class Transform {
private:
template <typename Functor>
struct Evaluator {
public:
Evaluator(Functor func, Range range, int device, bool shard) :
func_(func), range_{std::move(range)},
shard_{shard},
device_{device} {}
/*!
* \brief Evaluate the functor with input pointers to HostDeviceVector.
*
* \tparam HDV... HostDeviceVectors type.
* \param vectors Pointers to HostDeviceVector.
*/
template <typename... HDV>
void Eval(HDV... vectors) const {
bool on_device = device_ >= 0;
if (on_device) {
LaunchCUDA(func_, vectors...);
} else {
LaunchCPU(func_, vectors...);
}
}
private:
// CUDA UnpackHDV
template <typename T>
Span<T> UnpackHDVOnDevice(HostDeviceVector<T>* _vec) const {
auto span = _vec->DeviceSpan();
return span;
}
template <typename T>
Span<T const> UnpackHDVOnDevice(const HostDeviceVector<T>* _vec) const {
auto span = _vec->ConstDeviceSpan();
return span;
}
// CPU UnpackHDV
template <typename T>
Span<T> UnpackHDV(HostDeviceVector<T>* _vec) const {
return Span<T> {_vec->HostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
template <typename T>
Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec) const {
return Span<T const> {_vec->ConstHostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
// Recursive sync host
template <typename T>
void SyncHost(const HostDeviceVector<T> *_vector) const {
_vector->ConstHostPointer();
}
template <typename Head, typename... Rest>
void SyncHost(const HostDeviceVector<Head> *_vector,
const HostDeviceVector<Rest> *... _vectors) const {
_vector->ConstHostPointer();
SyncHost(_vectors...);
}
// Recursive unpack for Shard.
template <typename T>
void UnpackShard(int device, const HostDeviceVector<T> *vector) const {
vector->SetDevice(device);
}
template <typename Head, typename... Rest>
void UnpackShard(int device,
const HostDeviceVector<Head> *_vector,
const HostDeviceVector<Rest> *... _vectors) const {
_vector->SetDevice(device);
UnpackShard(device, _vectors...);
}
#if defined(__CUDACC__)
template <typename std::enable_if<CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
if (shard_)
UnpackShard(device_, _vectors...);
size_t range_size = *range_.end() - *range_.begin();
// Extract index to deal with possible old OpenMP.
// This deals with situation like multi-class setting where
// granularity is used in data vector.
size_t shard_size = range_size;
Range shard_range {0, static_cast<Range::DifferenceType>(shard_size)};
dh::safe_cuda(cudaSetDevice(device_));
const int kGrids =
static_cast<int>(DivRoundUp(*(range_.end()), kBlockThreads));
if (kGrids == 0) {
return;
}
detail::LaunchCUDAKernel<<<kGrids, kBlockThreads>>>( // NOLINT
_func, shard_range, UnpackHDVOnDevice(_vectors)...);
}
#else
/*! \brief Dummy funtion defined when compiling for CPU. */
template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA();
}
#endif // defined(__CUDACC__)
template <typename... HDV>
void LaunchCPU(Functor func, HDV*... vectors) const {
omp_ulong end = static_cast<omp_ulong>(*(range_.end()));
dmlc::OMPException omp_exc;
SyncHost(vectors...);
#pragma omp parallel for schedule(static)
for (omp_ulong idx = 0; idx < end; ++idx) {
omp_exc.Run(func, idx, UnpackHDV(vectors)...);
}
omp_exc.Rethrow();
}
private:
/*! \brief Callable object. */
Functor func_;
/*! \brief Range object specifying parallel threads index range. */
Range range_;
/*! \brief Whether sharding for vectors is required. */
bool shard_;
int device_;
};
public:
/*!
* \brief Initialize a Transform object.
*
* \tparam Functor A callable object type.
* \return A Evaluator having one method Eval.
*
* \param func A callable object, accepting a size_t thread index,
* followed by a set of Span classes.
* \param range Range object specifying parallel threads index range.
* \param device Specify GPU to use.
* \param shard Whether Shard for HostDeviceVector is needed.
*/
template <typename Functor>
static Evaluator<Functor> Init(Functor func, Range const range,
int device,
bool const shard = true) {
return Evaluator<Functor> {func, std::move(range), device, shard};
}
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_TRANSFORM_H_
|
openmp.c | /*
* Copyright (c) 2003, 2007-11 Matteo Frigo
* Copyright (c) 2003, 2007-11 Massachusetts Institute of Technology
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/* openmp.c: thread spawning via OpenMP */
#include "threads.h"
#if !defined(_OPENMP)
#error OpenMP enabled but not using an OpenMP compiler
#endif
int X(ithreads_init)(void)
{
return 0; /* no error */
}
/* Distribute a loop from 0 to loopmax-1 over nthreads threads.
proc(d) is called to execute a block of iterations from d->min
to d->max-1. d->thr_num indicate the number of the thread
that is executing proc (from 0 to nthreads-1), and d->data is
the same as the data parameter passed to X(spawn_loop).
This function returns only after all the threads have completed. */
void X(spawn_loop)(int loopmax, int nthr, spawn_function proc, void *data)
{
int block_size;
spawn_data d;
int i;
A(loopmax >= 0);
A(nthr > 0);
A(proc);
if (!loopmax) return;
/* Choose the block size and number of threads in order to (1)
minimize the critical path and (2) use the fewest threads that
achieve the same critical path (to minimize overhead).
e.g. if loopmax is 5 and nthr is 4, we should use only 3
threads with block sizes of 2, 2, and 1. */
block_size = (loopmax + nthr - 1) / nthr;
nthr = (loopmax + block_size - 1) / block_size;
THREAD_ON; /* prevent debugging mode from failing under threads */
#pragma omp parallel for private(d)
for (i = 0; i < nthr; ++i) {
d.max = (d.min = i * block_size) + block_size;
if (d.max > loopmax)
d.max = loopmax;
d.thr_num = i;
d.data = data;
proc(&d);
}
THREAD_OFF; /* prevent debugging mode from failing under threads */
}
void X(threads_cleanup)(void)
{
}
|
wip_7b843.c | #define _POSIX_C_SOURCE 200809L
#include "stdlib.h"
#include "math.h"
#include "sys/time.h"
#include "xmmintrin.h"
#include "pmmintrin.h"
#include "omp.h"
#include <stdio.h>
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define max(a, b) (((a) > (b)) ? (a) : (b))
struct dataobj
{
void *restrict data;
int *size;
int *npsize;
int *dsize;
int *hsize;
int *hofs;
int *oofs;
};
struct profiler
{
double section0;
double section1;
double section2;
};
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, float **restrict r47_vec, float **restrict r48_vec, const int time, const int tw, const int sp_zi_m);
int ForwardTTI(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, struct dataobj *restrict delta_vec, const float dt, struct dataobj *restrict epsilon_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict phi_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict theta_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, const int x_size, const int y_size, const int z_size, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine)
{
int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data;
float(*restrict delta)[delta_vec->size[1]][delta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[delta_vec->size[1]][delta_vec->size[2]])delta_vec->data;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict phi)[phi_vec->size[1]][phi_vec->size[2]] __attribute__((aligned(64))) = (float(*)[phi_vec->size[1]][phi_vec->size[2]])phi_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
float(*restrict theta)[theta_vec->size[1]][theta_vec->size[2]] __attribute__((aligned(64))) = (float(*)[theta_vec->size[1]][theta_vec->size[2]])theta_vec->data;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*r17)[y_size + 1][z_size + 1];
posix_memalign((void **)&r17, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r18)[y_size + 1][z_size + 1];
posix_memalign((void **)&r18, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r19)[y_size + 1][z_size + 1];
posix_memalign((void **)&r19, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r20)[y_size + 1][z_size + 1];
posix_memalign((void **)&r20, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float(*r21)[y_size + 1][z_size + 1];
posix_memalign((void **)&r21, 64, sizeof(float[x_size + 1][y_size + 1][z_size + 1]));
float **r47;
posix_memalign((void **)&r47, 64, sizeof(float *) * nthreads);
float **r48;
posix_memalign((void **)&r48, 64, sizeof(float *) * nthreads);
int y0_blk0_size = block_sizes[3];
int x0_blk0_size = block_sizes[2];
int yb_size = block_sizes[1];
int xb_size = block_sizes[0];
int sf = 2;
int t_blk_size = 2 * sf * (time_M - time_m);
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
posix_memalign((void **)&r47[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1]));
posix_memalign((void **)&r48[tid], 64, sizeof(float[x0_blk0_size + 1][y0_blk0_size + 1][z_size + 1]));
}
/* Flush denormal numbers to zero in hardware */
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
struct timeval start_section0, end_section0;
gettimeofday(&start_section0, NULL);
/* Begin section0 */
#pragma omp parallel num_threads(nthreads)
{
#pragma omp for collapse(1) schedule(static, 1)
for (int x = x_m - 1; x <= x_M; x += 1)
{
for (int y = y_m - 1; y <= y_M; y += 1)
{
#pragma omp simd aligned(delta, phi, theta : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
r17[x + 1][y + 1][z + 1] = sqrt(2 * delta[x + 4][y + 4][z + 4] + 1);
r18[x + 1][y + 1][z + 1] = cos(theta[x + 4][y + 4][z + 4]);
r19[x + 1][y + 1][z + 1] = sin(phi[x + 4][y + 4][z + 4]);
r20[x + 1][y + 1][z + 1] = sin(theta[x + 4][y + 4][z + 4]);
r21[x + 1][y + 1][z + 1] = cos(phi[x + 4][y + 4][z + 4]);
}
}
}
}
/* End section0 */
gettimeofday(&end_section0, NULL);
timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000;
for (int time = time_m, t1 = (time + 2) % (3), t0 = (time) % (3), t2 = (time + 1) % (3); time <= time_M; time += 1, t1 = (time + 2) % (3), t0 = (time) % (3), t2 = (time + 1) % (3))
{
int sf = 2;
int tw = ((time / sf) % (time_M - time_m + 1));
int t_blk_size = 2 * sf * (time_M - time_m);
struct timeval start_section1, end_section1;
gettimeofday(&start_section1, NULL);
/* Begin section1 */
//x_M - (x_M - x_m + 1)%(x0_blk0_size),x_m,y_M - (y_M - y_m + 1)%(y0_blk0_size),y_m,
bf0(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, u_vec, v_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, save_src_v_vec, source_id_vec, source_mask_vec, x0_blk0_size, x_size, y0_blk0_size, y_size, z_size, t0, t1, t2, x_M - (x_M - x_m + 1)%(x0_blk0_size), x_m, y_M - (y_M - y_m + 1)%(y0_blk0_size), y_m, z_M, z_m, nthreads, (float **)r47, (float **)r48, time, tw, sp_zi_m);
//bf0(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, u_vec, v_vec, vp_vec, x0_blk0_size, x_size, (y_M - y_m + 1) % (y0_blk0_size), y_size, z_size, t0, t1, t2, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads, (float **)r47, (float **)r48);
//bf0(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, u_vec, v_vec, vp_vec, (x_M - x_m + 1) % (x0_blk0_size), x_size, y0_blk0_size, y_size, z_size, t0, t1, t2, x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads, (float **)r47, (float **)r48);
//bf0(damp_vec, dt, epsilon_vec, (float *)r17, (float *)r18, (float *)r19, (float *)r20, (float *)r21, u_vec, v_vec, vp_vec, (x_M - x_m + 1) % (x0_blk0_size), x_size, (y_M - y_m + 1) % (y0_blk0_size), y_size, z_size, t0, t1, t2, x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads, (float **)r47, (float **)r48);
/* End section1 */
gettimeofday(&end_section1, NULL);
timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000;
}
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
free(r47[tid]);
free(r48[tid]);
}
free(r17);
free(r18);
free(r19);
free(r20);
free(r21);
free(r47);
free(r48);
return 0;
}
void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict epsilon_vec, float *restrict r17_vec, float *restrict r18_vec, float *restrict r19_vec, float *restrict r20_vec, float *restrict r21_vec, struct dataobj *restrict u_vec, struct dataobj *restrict v_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict save_src_v_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int x0_blk0_size, const int x_size, const int y0_blk0_size, const int y_size, const int z_size, const int t0, const int t1, const int t2, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, float **restrict r47_vec, float **restrict r48_vec, const int time, const int tw, const int sp_zi_m)
{
float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data;
float(*restrict epsilon)[epsilon_vec->size[1]][epsilon_vec->size[2]] __attribute__((aligned(64))) = (float(*)[epsilon_vec->size[1]][epsilon_vec->size[2]])epsilon_vec->data;
float(*restrict r17)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r17_vec;
float(*restrict r18)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r18_vec;
float(*restrict r19)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r19_vec;
float(*restrict r20)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r20_vec;
float(*restrict r21)[y_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y_size + 1][z_size + 1]) r21_vec;
float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data;
float(*restrict v)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]] __attribute__((aligned(64))) = (float(*)[v_vec->size[1]][v_vec->size[2]][v_vec->size[3]])v_vec->data;
float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data;
float **r47 = (float **)r47_vec;
float **r48 = (float **)r48_vec;
int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data;
float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data;
float(*restrict save_src_v)[save_src_v_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_v_vec->size[1]])save_src_v_vec->data;
int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data;
int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data;
int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data;
if (x0_blk0_size == 0)
{
return;
}
#pragma omp parallel num_threads(nthreads)
{
const int tid = omp_get_thread_num();
float(*restrict r34)[y0_blk0_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 1][z_size + 1]) r47[tid];
float(*restrict r35)[y0_blk0_size + 1][z_size + 1] __attribute__((aligned(64))) = (float(*)[y0_blk0_size + 1][z_size + 1]) r48[tid];
#pragma omp for collapse(1) schedule(dynamic, 1)
for (int x0_blk0 = x_m; x0_blk0 <= x_M; x0_blk0 += x0_blk0_size)
{
for (int y0_blk0 = y_m; y0_blk0 <= y_M; y0_blk0 += y0_blk0_size)
{
printf(" bf Timestep: %d, Updating x0_blk0: %d y0_blk0: %d \n", time, x0_blk0, y0_blk0);
for (int x = x0_blk0 - 1, xs = 0; x <= x0_blk0 + x0_blk0_size - 1; x += 1, xs += 1)
{
for (int y = y0_blk0 - 1, ys = 0; y <= y0_blk0 + y0_blk0_size - 1; y += 1, ys += 1)
{
#pragma omp simd aligned(u, v : 32)
for (int z = z_m - 1; z <= z_M; z += 1)
{
float r39 = -u[t0][x + 4][y + 4][z + 4];
r34[xs][ys][z + 1] = 1.0e-1F * (-(r39 + u[t0][x + 4][y + 4][z + 5]) * r18[x + 1][y + 1][z + 1] - (r39 + u[t0][x + 4][y + 5][z + 4]) * r19[x + 1][y + 1][z + 1] * r20[x + 1][y + 1][z + 1] - (r39 + u[t0][x + 5][y + 4][z + 4]) * r20[x + 1][y + 1][z + 1] * r21[x + 1][y + 1][z + 1]);
float r40 = -v[t0][x + 4][y + 4][z + 4];
r35[xs][ys][z + 1] = 1.0e-1F * (-(r40 + v[t0][x + 4][y + 4][z + 5]) * r18[x + 1][y + 1][z + 1] - (r40 + v[t0][x + 4][y + 5][z + 4]) * r19[x + 1][y + 1][z + 1] * r20[x + 1][y + 1][z + 1] - (r40 + v[t0][x + 5][y + 4][z + 4]) * r20[x + 1][y + 1][z + 1] * r21[x + 1][y + 1][z + 1]);
}
}
}
for (int x = x0_blk0, xs = 0; x <= x0_blk0 + x0_blk0_size - 1; x += 1, xs += 1)
{
for (int y = y0_blk0, ys = 0; y <= y0_blk0 + y0_blk0_size - 1; y += 1, ys += 1)
{
//printf(" bf1 Timestep tw: %d, Updating x: %d y: %d , Updating xs: %d ys: %d \n", time, x + 4, y + 4, xs, ys);
#pragma omp simd aligned(damp, epsilon, u, v, vp : 32)
for (int z = z_m; z <= z_M; z += 1)
{
float r46 = 1.0 / dt;
float r45 = 1.0 / (dt * dt);
float r44 = r18[x + 1][y + 1][z] * r35[xs + 1][ys + 1][z] - r18[x + 1][y + 1][z + 1] * r35[xs + 1][ys + 1][z + 1] + r19[x + 1][y][z + 1] * r20[x + 1][y][z + 1] * r35[xs + 1][ys][z + 1] - r19[x + 1][y + 1][z + 1] * r20[x + 1][y + 1][z + 1] * r35[xs + 1][ys + 1][z + 1] + r20[x][y + 1][z + 1] * r21[x][y + 1][z + 1] * r35[xs][ys + 1][z + 1] - r20[x + 1][y + 1][z + 1] * r21[x + 1][y + 1][z + 1] * r35[xs + 1][ys + 1][z + 1];
float r43 = 1.0 / (vp[x + 4][y + 4][z + 4] * vp[x + 4][y + 4][z + 4]);
float r42 = 1.0e-1F * (-r18[x + 1][y + 1][z] * r34[xs + 1][ys + 1][z] + r18[x + 1][y + 1][z + 1] * r34[xs + 1][ys + 1][z + 1] - r19[x + 1][y][z + 1] * r20[x + 1][y][z + 1] * r34[xs + 1][ys][z + 1] + r19[x + 1][y + 1][z + 1] * r20[x + 1][y + 1][z + 1] * r34[xs + 1][ys + 1][z + 1] - r20[x][y + 1][z + 1] * r21[x][y + 1][z + 1] * r34[xs][ys + 1][z + 1] + r20[x + 1][y + 1][z + 1] * r21[x + 1][y + 1][z + 1] * r34[xs + 1][ys + 1][z + 1]) - 8.33333315e-4F * (u[t0][x + 2][y + 4][z + 4] + u[t0][x + 4][y + 2][z + 4] + u[t0][x + 4][y + 4][z + 2] + u[t0][x + 4][y + 4][z + 6] + u[t0][x + 4][y + 6][z + 4] + u[t0][x + 6][y + 4][z + 4]) + 1.3333333e-2F * (u[t0][x + 3][y + 4][z + 4] + u[t0][x + 4][y + 3][z + 4] + u[t0][x + 4][y + 4][z + 3] + u[t0][x + 4][y + 4][z + 5] + u[t0][x + 4][y + 5][z + 4] + u[t0][x + 5][y + 4][z + 4]) - 7.49999983e-2F * u[t0][x + 4][y + 4][z + 4];
float r41 = 1.0 / (r43 * r45 + r46 * damp[x + 1][y + 1][z + 1]);
float r32 = r45 * (-2.0F * u[t0][x + 4][y + 4][z + 4] + u[t1][x + 4][y + 4][z + 4]);
float r33 = r45 * (-2.0F * v[t0][x + 4][y + 4][z + 4] + v[t1][x + 4][y + 4][z + 4]);
u[t2][x + 4][y + 4][z + 4] = r41 * ((-r32) * r43 + r42 * (2 * epsilon[x + 4][y + 4][z + 4] + 1) + 1.0e-1F * r44 * r17[x + 1][y + 1][z + 1] + r46 * (damp[x + 1][y + 1][z + 1] * u[t0][x + 4][y + 4][z + 4]));
v[t2][x + 4][y + 4][z + 4] = r41 * ((-r33) * r43 + r42 * r17[x + 1][y + 1][z + 1] + 1.0e-1F * r44 + r46 * (damp[x + 1][y + 1][z + 1] * v[t0][x + 4][y + 4][z + 4]));
}
int sp_zi_M = nnz_sp_source_mask[x][y] - 1;
for (int sp_zi = sp_zi_m; sp_zi <= sp_zi_M; sp_zi += 1)
{
int zind = sp_source_mask[x][y][sp_zi];
float r22 = save_src_u[time][source_id[x][y][zind]] * source_mask[x][y][zind];
u[t2][x + 4][y + 4][zind + 4] += r22;
float r23 = save_src_v[time][source_id[x][y][zind]] * source_mask[x][y][zind];
v[t2][x + 4][y + 4][zind + 4] += r23;
}
}
}
}
}
}
} |
thread.h | #ifndef __thread_h__
#define __thread_h__
#define THREAD_QUEUE_SIZE 1024
inline void add_to_queue(int* thread_queue, int& thread_queue_size,
int* queue_next, int& queue_size_next, int vert);
inline void empty_queue(int* thread_queue, int& thread_queue_size,
int* queue_next, int& queue_size_next);
inline void add_to_queue(int* thread_queue, int& thread_queue_size,
int* queue_next, int& queue_size_next, int vert)
{
thread_queue[thread_queue_size++] = vert;
if (thread_queue_size == THREAD_QUEUE_SIZE)
empty_queue(thread_queue, thread_queue_size,
queue_next, queue_size_next);
}
inline void empty_queue(int* thread_queue, int& thread_queue_size,
int* queue_next, int& queue_size_next)
{
int start_offset;
#pragma omp atomic capture
start_offset = queue_size_next += thread_queue_size;
start_offset -= thread_queue_size;
for (int i = 0; i < thread_queue_size; ++i)
queue_next[start_offset + i] = thread_queue[i];
thread_queue_size = 0;
}
#endif |
ccsd_t.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <complex.h>
#include "config.h"
#include "np_helper/np_helper.h"
#include "vhf/fblas.h"
typedef struct {
void *cache[6];
short a;
short b;
short c;
short _padding;
} CacheJob;
/*
* 4 * w + w.transpose(1,2,0) + w.transpose(2,0,1)
* - 2 * w.transpose(2,1,0) - 2 * w.transpose(0,2,1)
* - 2 * w.transpose(1,0,2)
*/
static void add_and_permute(double *out, double *w, double *v, int n, double fac)
{
int nn = n * n;
int nnn = nn * n;
int i, j, k;
for (i = 0; i < nnn; i++) {
v[i] *= fac;
v[i] += w[i];
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++) {
out[i*nn+j*n+k] = v[i*nn+j*n+k] * 4
+ v[j*nn+k*n+i]
+ v[k*nn+i*n+j]
- v[k*nn+j*n+i] * 2
- v[i*nn+k*n+j] * 2
- v[j*nn+i*n+k] * 2;
} } }
}
/*
* t2T = t2.transpose(2,3,1,0)
* ov = vv_op[:,nocc:]
* oo = vv_op[:,:nocc]
* w = numpy.einsum('if,fjk->ijk', ov, t2T[c])
* w-= numpy.einsum('ijm,mk->ijk', vooo[a], t2T[c,b])
* v = numpy.einsum('ij,k->ijk', oo, t1T[c]*.5)
* v+= numpy.einsum('ij,k->ijk', t2T[b,a], fov[:,c]*.5)
* v+= w
*/
static void get_wv(double *w, double *v, double *cache,
double *fvohalf, double *vooo,
double *vv_op, double *t1Thalf, double *t2T,
int nocc, int nvir, int a, int b, int c, int *idx)
{
const double D0 = 0;
const double D1 = 1;
const double DN1 =-1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double *pt2T;
dgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&D1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
dgemm_(&TRANS_N, &TRANS_N, &nocc, &noo, &nocc,
&DN1, t2T+c*nvoo+b*noo, &nocc, vooo+a*nooo, &nocc,
&D1, cache, &nocc);
pt2T = t2T + b * nvoo + a * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
v[idx[n]] +=(vv_op[i*nmo+j] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
static void sym_wv(double *w, double *v, double *cache,
double *fvohalf, double *vooo,
double *vv_op, double *t1Thalf, double *t2T,
int nocc, int nvir, int a, int b, int c, int nirrep,
int *o_ir_loc, int *v_ir_loc, int *oo_ir_loc, int *orbsym,
int *idx)
{
const double D0 = 0;
const double D1 = 1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int a_irrep = orbsym[nocc+a];
int b_irrep = orbsym[nocc+b];
int c_irrep = orbsym[nocc+c];
int ab_irrep = a_irrep ^ b_irrep;
int bc_irrep = c_irrep ^ b_irrep;
int i, j, k, n;
int fr, f0, f1, df, mr, m0, m1, dm, mk0;
int ir, i0, i1, di, kr, k0, k1, dk, jr;
int ijr, ij0, ij1, dij, jkr, jk0, jk1, djk;
double *pt2T;
/* symmetry adapted
* w = numpy.einsum('if,fjk->ijk', ov, t2T[c]) */
pt2T = t2T + c * nvoo;
for (ir = 0; ir < nirrep; ir++) {
i0 = o_ir_loc[ir];
i1 = o_ir_loc[ir+1];
di = i1 - i0;
if (di > 0) {
fr = ir ^ ab_irrep;
f0 = v_ir_loc[fr];
f1 = v_ir_loc[fr+1];
df = f1 - f0;
if (df > 0) {
jkr = fr ^ c_irrep;
jk0 = oo_ir_loc[jkr];
jk1 = oo_ir_loc[jkr+1];
djk = jk1 - jk0;
if (djk > 0) {
dgemm_(&TRANS_N, &TRANS_N, &djk, &di, &df,
&D1, pt2T+f0*noo+jk0, &noo, vv_op+i0*nmo+nocc+f0, &nmo,
&D0, cache, &djk);
for (n = 0, i = o_ir_loc[ir]; i < o_ir_loc[ir+1]; i++) {
for (jr = 0; jr < nirrep; jr++) {
kr = jkr ^ jr;
for (j = o_ir_loc[jr]; j < o_ir_loc[jr+1]; j++) {
for (k = o_ir_loc[kr]; k < o_ir_loc[kr+1]; k++, n++) {
w[idx[i*noo+j*nocc+k]] += cache[n];
} }
} }
}
}
}
}
/* symmetry adapted
* w-= numpy.einsum('ijm,mk->ijk', eris_vooo[a], t2T[c,b]) */
pt2T = t2T + c * nvoo + b * noo;
vooo += a * nooo;
mk0 = oo_ir_loc[bc_irrep];
for (mr = 0; mr < nirrep; mr++) {
m0 = o_ir_loc[mr];
m1 = o_ir_loc[mr+1];
dm = m1 - m0;
if (dm > 0) {
kr = mr ^ bc_irrep;
k0 = o_ir_loc[kr];
k1 = o_ir_loc[kr+1];
dk = k1 - k0;
if (dk > 0) {
ijr = mr ^ a_irrep;
ij0 = oo_ir_loc[ijr];
ij1 = oo_ir_loc[ijr+1];
dij = ij1 - ij0;
if (dij > 0) {
dgemm_(&TRANS_N, &TRANS_N, &dk, &dij, &dm,
&D1, pt2T+mk0, &dk, vooo+ij0*nocc+m0, &nocc,
&D0, cache, &dk);
for (n = 0, ir = 0; ir < nirrep; ir++) {
jr = ijr ^ ir;
for (i = o_ir_loc[ir]; i < o_ir_loc[ir+1]; i++) {
for (j = o_ir_loc[jr]; j < o_ir_loc[jr+1]; j++) {
for (k = o_ir_loc[kr]; k < o_ir_loc[kr+1]; k++, n++) {
w[idx[i*noo+j*nocc+k]] -= cache[n];
} }
} }
}
mk0 += dm * dk;
}
}
}
pt2T = t2T + b * nvoo + a * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
v[idx[n]] +=(vv_op[i*nmo+j] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
double _ccsd_t_get_energy(double *w, double *v, double *mo_energy, int nocc,
int a, int b, int c, double fac)
{
int i, j, k, n;
double abc = mo_energy[nocc+a] + mo_energy[nocc+b] + mo_energy[nocc+c];
double et = 0;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
et += fac * w[n] * v[n] / (mo_energy[i] + mo_energy[j] + mo_energy[k] - abc);
} } }
return et;
}
static double contract6(int nocc, int nvir, int a, int b, int c,
double *mo_energy, double *t1T, double *t2T,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym, double *fvo,
double *vooo, double *cache1, void **cache,
int *permute_idx, double fac)
{
int nooo = nocc * nocc * nocc;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
double *v0 = cache1;
double *w0 = v0 + nooo;
double *z0 = w0 + nooo;
double *wtmp = z0;
int i;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
if (nirrep == 1) {
get_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c, idx0);
get_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b, idx1);
get_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c, idx2);
get_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a, idx3);
get_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b, idx4);
get_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a, idx5);
} else {
sym_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx0);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx1);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx2);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx3);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx4);
sym_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym, idx5);
}
add_and_permute(z0, w0, v0, nocc, fac);
double et;
if (a == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
return et;
}
size_t _ccsd_t_gen_jobs(CacheJob *jobs, int nocc, int nvir,
int a0, int a1, int b0, int b1,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b, size_t stride)
{
size_t nov = nocc * (nocc+nvir) * stride;
int da = a1 - a0;
int db = b1 - b0;
size_t m, a, b, c;
if (b1 <= a0) {
m = 0;
for (a = a0; a < a1; a++) {
for (b = b0; b < b1; b++) {
for (c = 0; c < b0; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b );
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c );
jobs[m].cache[2] = cache_col_a + nov*(da*(b) +a-a0);
jobs[m].cache[3] = cache_row_b + nov*(b1*(b-b0)+c );
jobs[m].cache[4] = cache_col_a + nov*(da*(c) +a-a0);
jobs[m].cache[5] = cache_col_b + nov*(db*(c) +b-b0);
}
for (c = b0; c <= b; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b );
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c );
jobs[m].cache[2] = cache_col_a + nov*(da*(b) +a-a0);
jobs[m].cache[3] = cache_row_b + nov*(b1*(b-b0)+c );
jobs[m].cache[4] = cache_col_a + nov*(da*(c) +a-a0);
jobs[m].cache[5] = cache_row_b + nov*(b1*(c-b0)+b );
}
} }
} else {
m = 0;
for (a = a0; a < a1; a++) {
for (b = a0; b <= a; b++) {
for (c = 0; c < a0; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b);
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c);
jobs[m].cache[2] = cache_row_a + nov*(a1*(b-a0)+a);
jobs[m].cache[3] = cache_row_a + nov*(a1*(b-a0)+c);
jobs[m].cache[4] = cache_col_a + nov*(da*(c)+a-a0);
jobs[m].cache[5] = cache_col_a + nov*(da*(c)+b-a0);
}
for (c = a0; c <= b; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
jobs[m].cache[0] = cache_row_a + nov*(a1*(a-a0)+b);
jobs[m].cache[1] = cache_row_a + nov*(a1*(a-a0)+c);
jobs[m].cache[2] = cache_row_a + nov*(a1*(b-a0)+a);
jobs[m].cache[3] = cache_row_a + nov*(a1*(b-a0)+c);
jobs[m].cache[4] = cache_row_a + nov*(a1*(c-a0)+a);
jobs[m].cache[5] = cache_row_a + nov*(a1*(c-a0)+b);
}
} }
}
return m;
}
void _make_permute_indices(int *idx, int n)
{
const int nn = n * n;
const int nnn = nn * n;
int *idx0 = idx;
int *idx1 = idx0 + nnn;
int *idx2 = idx1 + nnn;
int *idx3 = idx2 + nnn;
int *idx4 = idx3 + nnn;
int *idx5 = idx4 + nnn;
int i, j, k, m;
for (m = 0, i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++, m++) {
idx0[m] = i * nn + j * n + k;
idx1[m] = i * nn + k * n + j;
idx2[m] = j * nn + i * n + k;
idx3[m] = k * nn + i * n + j;
idx4[m] = j * nn + k * n + i;
idx5[m] = k * nn + j * n + i;
} } }
}
void CCsd_t_contract(double *e_tot,
double *mo_energy, double *t1T, double *t2T,
double *vooo, double *fvo,
int nocc, int nvir, int a0, int a1, int b0, int b1,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b, sizeof(double));
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \
v_ir_loc, oo_ir_loc, orbsym, vooo, fvo, jobs, e_tot, permute_idx)
{
int a, b, c;
size_t k;
double *cache1 = malloc(sizeof(double) * (nocc*nocc*nocc*3+2));
double *t1Thalf = malloc(sizeof(double) * nvir*nocc * 2);
double *fvohalf = t1Thalf + nvir*nocc;
for (k = 0; k < nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
double e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += contract6(nocc, nvir, a, b, c, mo_energy, t1Thalf, t2T,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym,
fvohalf, vooo, cache1, jobs[k].cache, permute_idx,
1.0);
}
free(t1Thalf);
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
void QCIsd_t_contract(double *e_tot,
double *mo_energy, double *t1T, double *t2T,
double *vooo, double *fvo,
int nocc, int nvir, int a0, int a1, int b0, int b1,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b, sizeof(double));
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \
v_ir_loc, oo_ir_loc, orbsym, vooo, fvo, jobs, e_tot, permute_idx)
{
int a, b, c;
size_t k;
double *cache1 = malloc(sizeof(double) * (nocc*nocc*nocc*3+2));
double *t1Thalf = malloc(sizeof(double) * nvir*nocc * 2);
double *fvohalf = t1Thalf + nvir*nocc;
for (k = 0; k < nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
double e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += contract6(nocc, nvir, a, b, c, mo_energy, t1Thalf, t2T,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym,
fvohalf, vooo, cache1, jobs[k].cache, permute_idx,
2.0);
}
free(t1Thalf);
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
/*
* Complex version of all functions
*/
static void zadd_and_permute(double complex *out, double complex *w,
double complex *v, int n, double fac)
{
int nn = n * n;
int nnn = nn * n;
int i, j, k;
for (i = 0; i < nnn; i++) {
v[i] *= fac;
v[i] += w[i];
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
for (k = 0; k < n; k++) {
out[i*nn+j*n+k] = v[i*nn+j*n+k] * 4
+ v[j*nn+k*n+i]
+ v[k*nn+i*n+j]
- v[k*nn+j*n+i] * 2
- v[i*nn+k*n+j] * 2
- v[j*nn+i*n+k] * 2;
} } }
}
static void zget_wv(double complex *w, double complex *v,
double complex *cache, double complex *fvohalf,
double complex *vooo, double complex *vv_op,
double complex *t1Thalf, double complex *t2T,
int nocc, int nvir, int a, int b, int c, int *idx)
{
const double complex D0 = 0;
const double complex D1 = 1;
const double complex DN1 =-1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double complex *pt2T;
zgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&D1, t2T+c*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
zgemm_(&TRANS_N, &TRANS_N, &nocc, &noo, &nocc,
&DN1, t2T+c*nvoo+b*noo, &nocc, vooo+a*nooo, &nocc,
&D1, cache, &nocc);
pt2T = t2T + b * nvoo + a * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
v[idx[n]] +=(vv_op[i*nmo+j] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
double _ccsd_t_zget_energy(double complex *w, double complex *v,
double *mo_energy, int nocc,
int a, int b, int c, double fac)
{
int i, j, k, n;
double abc = mo_energy[nocc+a] + mo_energy[nocc+b] + mo_energy[nocc+c];
double et = 0;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
et += fac / (mo_energy[i] + mo_energy[j] + mo_energy[k] - abc) * w[n] * conj(v[n]);
} } }
return et;
}
static double complex
zcontract6(int nocc, int nvir, int a, int b, int c,
double *mo_energy, double complex *t1T, double complex *t2T,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym, double complex *fvo,
double complex *vooo, double complex *cache1, void **cache,
int *permute_idx, double fac)
{
int nooo = nocc * nocc * nocc;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
double complex *v0 = cache1;
double complex *w0 = v0 + nooo;
double complex *z0 = w0 + nooo;
double complex *wtmp = z0;
int i;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
zget_wv(w0, v0, wtmp, fvo, vooo, cache[0], t1T, t2T, nocc, nvir, a, b, c, idx0);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[1], t1T, t2T, nocc, nvir, a, c, b, idx1);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[2], t1T, t2T, nocc, nvir, b, a, c, idx2);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[3], t1T, t2T, nocc, nvir, b, c, a, idx3);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[4], t1T, t2T, nocc, nvir, c, a, b, idx4);
zget_wv(w0, v0, wtmp, fvo, vooo, cache[5], t1T, t2T, nocc, nvir, c, b, a, idx5);
zadd_and_permute(z0, w0, v0, nocc, fac);
double complex et;
if (a == c) {
et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
et = _ccsd_t_zget_energy(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
return et;
}
void CCsd_t_zcontract(double complex *e_tot,
double *mo_energy, double complex *t1T, double complex *t2T,
double complex *vooo, double complex *fvo,
int nocc, int nvir, int a0, int a1, int b0, int b1,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b,
sizeof(double complex));
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \
v_ir_loc, oo_ir_loc, orbsym, vooo, fvo, jobs, e_tot, permute_idx)
{
int a, b, c;
size_t k;
double complex *cache1 = malloc(sizeof(double complex) * (nocc*nocc*nocc*3+2));
double complex *t1Thalf = malloc(sizeof(double complex) * nvir*nocc * 2);
double complex *fvohalf = t1Thalf + nvir*nocc;
for (k = 0; k < nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
double complex e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += zcontract6(nocc, nvir, a, b, c, mo_energy, t1Thalf, t2T,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym,
fvohalf, vooo, cache1, jobs[k].cache, permute_idx,
1.0);
}
free(t1Thalf);
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
void QCIsd_t_zcontract(double complex *e_tot,
double *mo_energy, double complex *t1T, double complex *t2T,
double complex *vooo, double complex *fvo,
int nocc, int nvir, int a0, int a1, int b0, int b1,
int nirrep, int *o_ir_loc, int *v_ir_loc,
int *oo_ir_loc, int *orbsym,
void *cache_row_a, void *cache_col_a,
void *cache_row_b, void *cache_col_b)
{
int da = a1 - a0;
int db = b1 - b0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*b1);
size_t njobs = _ccsd_t_gen_jobs(jobs, nocc, nvir, a0, a1, b0, b1,
cache_row_a, cache_col_a,
cache_row_b, cache_col_b,
sizeof(double complex));
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, t2T, nirrep, o_ir_loc, \
v_ir_loc, oo_ir_loc, orbsym, vooo, fvo, jobs, e_tot, permute_idx)
{
int a, b, c;
size_t k;
double complex *cache1 = malloc(sizeof(double complex) * (nocc*nocc*nocc*3+2));
double complex *t1Thalf = malloc(sizeof(double complex) * nvir*nocc * 2);
double complex *fvohalf = t1Thalf + nvir*nocc;
for (k = 0; k < nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
double complex e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += zcontract6(nocc, nvir, a, b, c, mo_energy, t1Thalf, t2T,
nirrep, o_ir_loc, v_ir_loc, oo_ir_loc, orbsym,
fvohalf, vooo, cache1, jobs[k].cache, permute_idx,
2.0);
}
free(t1Thalf);
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
/*****************************************************************************
*
* mpi4pyscf
*
*****************************************************************************/
static void MPICCget_wv(double *w, double *v, double *cache,
double *fvohalf, double *vooo,
double *vv_op, double *t1Thalf,
double *t2T_a, double *t2T_c,
int nocc, int nvir, int a, int b, int c,
int a0, int b0, int c0, int *idx)
{
const double D0 = 0;
const double D1 = 1;
const double DN1 = -1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double *pt2T;
dgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&D1, t2T_c+(c-c0)*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
dgemm_(&TRANS_N, &TRANS_N, &nocc, &noo, &nocc,
&DN1, t2T_c+(c-c0)*nvoo+b*noo, &nocc, vooo+(a-a0)*nooo, &nocc,
&D1, cache, &nocc);
pt2T = t2T_a + (a-a0) * nvoo + b * noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
v[idx[n]] +=(vv_op[i*nmo+j] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
} } }
}
static double MPICCcontract6(int nocc, int nvir, int a, int b, int c,
double *mo_energy, double *t1T, double *fvo,
int *slices, double **data_ptrs, double *cache1,
int *permute_idx, double fac)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
const int da = a1 - a0;
const int db = b1 - b0;
const int dc = c1 - c0;
const int nooo = nocc * nocc * nocc;
const int nmo = nocc + nvir;
const size_t nop = nocc * nmo;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
double *vvop_ab = data_ptrs[0] + ((a-a0)*db+b-b0) * nop;
double *vvop_ac = data_ptrs[1] + ((a-a0)*dc+c-c0) * nop;
double *vvop_ba = data_ptrs[2] + ((b-b0)*da+a-a0) * nop;
double *vvop_bc = data_ptrs[3] + ((b-b0)*dc+c-c0) * nop;
double *vvop_ca = data_ptrs[4] + ((c-c0)*da+a-a0) * nop;
double *vvop_cb = data_ptrs[5] + ((c-c0)*db+b-b0) * nop;
double *vooo_a = data_ptrs[6];
double *vooo_b = data_ptrs[7];
double *vooo_c = data_ptrs[8];
double *t2T_a = data_ptrs[9 ];
double *t2T_b = data_ptrs[10];
double *t2T_c = data_ptrs[11];
double *v0 = cache1;
double *w0 = v0 + nooo;
double *z0 = w0 + nooo;
double *wtmp = z0;
int i;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
MPICCget_wv(w0, v0, wtmp, fvo, vooo_a, vvop_ab, t1T, t2T_a, t2T_c, nocc, nvir, a, b, c, a0, b0, c0, idx0);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_a, vvop_ac, t1T, t2T_a, t2T_b, nocc, nvir, a, c, b, a0, c0, b0, idx1);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_b, vvop_ba, t1T, t2T_b, t2T_c, nocc, nvir, b, a, c, b0, a0, c0, idx2);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_b, vvop_bc, t1T, t2T_b, t2T_a, nocc, nvir, b, c, a, b0, c0, a0, idx3);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_c, vvop_ca, t1T, t2T_c, t2T_b, nocc, nvir, c, a, b, c0, a0, b0, idx4);
MPICCget_wv(w0, v0, wtmp, fvo, vooo_c, vvop_cb, t1T, t2T_c, t2T_a, nocc, nvir, c, b, a, c0, b0, a0, idx5);
add_and_permute(z0, w0, v0, nocc, fac);
double et;
if (a == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1./6);
} else if (a == b || b == c) {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, .5);
} else {
et = _ccsd_t_get_energy(w0, z0, mo_energy, nocc, a, b, c, 1.);
}
return et;
}
size_t _MPICCsd_t_gen_jobs(CacheJob *jobs, int nocc, int nvir,
int *slices, double **data_ptrs)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
size_t m, a, b, c;
m = 0;
for (a = a0; a < a1; a++) {
for (b = b0; b < MIN(b1, a+1); b++) {
for (c = c0; c < MIN(c1, b+1); c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
} } }
return m;
}
void MPICCsd_t_contract(double *e_tot, double *mo_energy, double *t1T,
double *fvo, int nocc, int nvir,
int *slices, double **data_ptrs)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
int da = a1 - a0;
int db = b1 - b0;
int dc = c1 - c0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*dc);
size_t njobs = _MPICCsd_t_gen_jobs(jobs, nocc, nvir, slices, data_ptrs);
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, mo_energy, t1T, fvo, jobs, e_tot, slices, \
data_ptrs, permute_idx)
{
int a, b, c;
size_t k;
double *cache1 = malloc(sizeof(double) * (nocc*nocc*nocc*3+2));
double *t1Thalf = malloc(sizeof(double) * nvir*nocc * 2);
double *fvohalf = t1Thalf + nvir*nocc;
for (k = 0; k < nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
double e = 0;
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
e += MPICCcontract6(nocc, nvir, a, b, c, mo_energy, t1Thalf,
fvohalf, slices, data_ptrs, cache1,
permute_idx, 1.0);
}
free(t1Thalf);
free(cache1);
#pragma omp critical
*e_tot += e;
}
free(permute_idx);
}
/*****************************************************************************
*
* pyscf periodic ccsd(t) with k-points
*
*****************************************************************************/
size_t _CCsd_t_gen_jobs_full(CacheJob *jobs, int nocc, int nvir,
int *slices)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
size_t m, a, b, c;
m = 0;
for (a = a0; a < a1; a++) {
for (b = b0; b < b1; b++) {
for (c = c0; c < c1; c++, m++) {
jobs[m].a = a;
jobs[m].b = b;
jobs[m].c = c;
} } }
return m;
}
static void CCzget_wv(double complex *w, double complex *v, double complex *cache,
double complex *fvohalf, double complex *vooo,
double complex *vv_op, double complex *vv_op2,
double complex *t1Thalf, double complex *t2T_c1,
double complex *t2T_c2, double complex *t2T_c3,
int nocc, int nvir, int a, int b, int c,
int a0, int b0, int c0, int *idx, int bool_add_v)
{
const double complex D0 = 0;
const double complex D1 = 1;
const double complex DN1 = -1;
const char TRANS_N = 'N';
const int nmo = nocc + nvir;
const int noo = nocc * nocc;
const size_t nooo = nocc * noo;
const size_t nvoo = nvir * noo;
int i, j, k, n;
double complex *pt2T;
zgemm_(&TRANS_N, &TRANS_N, &noo, &nocc, &nvir,
&D1, t2T_c1+(c-c0)*nvoo, &noo, vv_op+nocc, &nmo,
&D0, cache, &noo);
zgemm_(&TRANS_N, &TRANS_N, &nocc, &noo, &nocc,
&DN1, t2T_c2+(c-c0)*nvoo+b*noo, &nocc, vooo+(a-a0)*nooo, &nocc,
&D1, cache, &nocc);
pt2T = t2T_c3 + (b-b0)*nvoo + a*noo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
w[idx[n]] += cache[n];
if(bool_add_v == 1){
v[idx[n]] += (vv_op2[j*nmo+i] * t1Thalf[c*nocc+k]
+ pt2T[i*nocc+j] * fvohalf[c*nocc+k]);
}
} } }
}
static void zcontract6_t3T(int nocc, int nvir, int a, int b, int c,
int *mo_offset, double complex *t3Tw,
double complex *t3Tv, double *mo_energy,
double complex *t1T, double complex *fvo, int *slices,
double complex **data_ptrs, double complex *cache1,
int *permute_idx)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
const int da = a1 - a0;
const int db = b1 - b0;
const int dc = c1 - c0;
const int nooo = nocc * nocc * nocc;
const int nmo = nocc + nvir;
const int nop = nocc * nmo;
const int nov = nocc * nvir;
int *idx0 = permute_idx;
int *idx1 = idx0 + nooo;
int *idx2 = idx1 + nooo;
int *idx3 = idx2 + nooo;
int *idx4 = idx3 + nooo;
int *idx5 = idx4 + nooo;
int ki = mo_offset[0];
int kj = mo_offset[1];
int kk = mo_offset[2];
int ka = mo_offset[3];
int kb = mo_offset[4];
int kc = mo_offset[5];
double complex *t1T_a = t1T + ka * nov;
double complex *t1T_b = t1T + kb * nov;
double complex *t1T_c = t1T + kc * nov;
double complex *fvo_a = fvo + ka * nov;
double complex *fvo_b = fvo + kb * nov;
double complex *fvo_c = fvo + kc * nov;
double complex *vvop_ab = data_ptrs[0] + ((a-a0)*db+b-b0) * nop;
double complex *vvop_ac = data_ptrs[1] + ((a-a0)*dc+c-c0) * nop;
double complex *vvop_ba = data_ptrs[2] + ((b-b0)*da+a-a0) * nop;
double complex *vvop_bc = data_ptrs[3] + ((b-b0)*dc+c-c0) * nop;
double complex *vvop_ca = data_ptrs[4] + ((c-c0)*da+a-a0) * nop;
double complex *vvop_cb = data_ptrs[5] + ((c-c0)*db+b-b0) * nop;
double complex *vooo_aj = data_ptrs[6];
double complex *vooo_ak = data_ptrs[7];
double complex *vooo_bi = data_ptrs[8];
double complex *vooo_bk = data_ptrs[9];
double complex *vooo_ci = data_ptrs[10];
double complex *vooo_cj = data_ptrs[11];
double complex *t2T_cj = data_ptrs[12];
double complex *t2T_cb = data_ptrs[13];
double complex *t2T_bk = data_ptrs[14];
double complex *t2T_bc = data_ptrs[15];
double complex *t2T_ci = data_ptrs[16];
double complex *t2T_ca = data_ptrs[17];
double complex *t2T_ak = data_ptrs[18];
double complex *t2T_ac = data_ptrs[19];
double complex *t2T_bi = data_ptrs[20];
double complex *t2T_ba = data_ptrs[21];
double complex *t2T_aj = data_ptrs[22];
double complex *t2T_ab = data_ptrs[23];
double complex *v0 = cache1;
double complex *w0 = v0 + nooo;
double complex *z0 = w0 + nooo;
double complex *wtmp = z0;
int i, j, k, n;
int offset;
for (i = 0; i < nooo; i++) {
w0[i] = 0;
v0[i] = 0;
}
/*
* t2T = t2.transpose(2,3,1,0)
* ov = vv_op[:,nocc:]
* oo = vv_op[:,:nocc]
* w = numpy.einsum('if,fjk->ijk', ov, t2T[c])
* w-= numpy.einsum('ijm,mk->ijk', vooo[a], t2T[c,b])
* v = numpy.einsum('ij,k->ijk', oo, t1T[c]*.5)
* v+= numpy.einsum('ij,k->ijk', t2T[b,a], fov[:,c]*.5)
* v+= w
*/
CCzget_wv(w0, v0, wtmp, fvo_c, vooo_aj, vvop_ab, vvop_ba, t1T_c, t2T_cj, t2T_cb, t2T_ba,
nocc, nvir, a, b, c, a0, b0, c0, idx0, (kk==kc));
CCzget_wv(w0, v0, wtmp, fvo_b, vooo_ak, vvop_ac, vvop_ca, t1T_b, t2T_bk, t2T_bc, t2T_ca,
nocc, nvir, a, c, b, a0, c0, b0, idx1, (kj==kb));
CCzget_wv(w0, v0, wtmp, fvo_c, vooo_bi, vvop_ba, vvop_ab, t1T_c, t2T_ci, t2T_ca, t2T_ab,
nocc, nvir, b, a, c, b0, a0, c0, idx2, (kk==kc));
CCzget_wv(w0, v0, wtmp, fvo_a, vooo_bk, vvop_bc, vvop_cb, t1T_a, t2T_ak, t2T_ac, t2T_cb,
nocc, nvir, b, c, a, b0, c0, a0, idx3, (ka==ki));
CCzget_wv(w0, v0, wtmp, fvo_b, vooo_ci, vvop_ca, vvop_ac, t1T_b, t2T_bi, t2T_ba, t2T_ac,
nocc, nvir, c, a, b, c0, a0, b0, idx4, (kb==kj));
CCzget_wv(w0, v0, wtmp, fvo_a, vooo_cj, vvop_cb, vvop_bc, t1T_a, t2T_aj, t2T_ab, t2T_bc,
nocc, nvir, c, b, a, c0, b0, a0, idx5, (ka==ki));
offset = (((a-a0)*db + b-b0)*dc + c-c0)*nooo;
for (n = 0, i = 0; i < nocc; i++) {
for (j = 0; j < nocc; j++) {
for (k = 0; k < nocc; k++, n++) {
//div = 1. / (mo_energy[i+ki*nmo] + mo_energy[j+kj*nmo] + mo_energy[k+kk*nmo] - abc);
t3Tw[offset + n] = w0[n];
t3Tv[offset + n] = v0[n];
} } }
}
void CCsd_zcontract_t3T(double complex *t3Tw, double complex *t3Tv, double *mo_energy,
double complex *t1T, double complex *fvo, int nocc, int nvir, int nkpts,
int *mo_offset, int *slices, double complex **data_ptrs)
{
const int a0 = slices[0];
const int a1 = slices[1];
const int b0 = slices[2];
const int b1 = slices[3];
const int c0 = slices[4];
const int c1 = slices[5];
int da = a1 - a0;
int db = b1 - b0;
int dc = c1 - c0;
CacheJob *jobs = malloc(sizeof(CacheJob) * da*db*dc);
size_t njobs = _CCsd_t_gen_jobs_full(jobs, nocc, nvir, slices);
int *permute_idx = malloc(sizeof(int) * nocc*nocc*nocc * 6);
_make_permute_indices(permute_idx, nocc);
#pragma omp parallel default(none) \
shared(njobs, nocc, nvir, nkpts, t3Tw, t3Tv, mo_offset, mo_energy, t1T, fvo, jobs, slices, \
data_ptrs, permute_idx)
{
int a, b, c;
size_t k;
complex double *cache1 = malloc(sizeof(double complex) * (nocc*nocc*nocc*3+2));
complex double *t1Thalf = malloc(sizeof(double complex) * nkpts*nvir*nocc*2);
complex double *fvohalf = t1Thalf + nkpts*nvir*nocc;
for (k = 0; k < nkpts*nvir*nocc; k++) {
t1Thalf[k] = t1T[k] * .5;
fvohalf[k] = fvo[k] * .5;
}
#pragma omp for schedule (dynamic, 4)
for (k = 0; k < njobs; k++) {
a = jobs[k].a;
b = jobs[k].b;
c = jobs[k].c;
zcontract6_t3T(nocc, nvir, a, b, c, mo_offset, t3Tw, t3Tv, mo_energy, t1Thalf,
fvohalf, slices, data_ptrs, cache1,
permute_idx);
}
free(t1Thalf);
free(cache1);
}
free(jobs);
free(permute_idx);
}
|
sample.c | #pragma omp parallel
{
#pragma omp single
{
for (i=0; i<N; i++) array[i] =0;
}
//暗黙の同期
#pragma omp for
for (i = 0; i < N; i++)
{
array[i] = i;
}
//暗黙の同期
#pragma omp sections
{
#pragma omp section
frunc1();
#pragma omp section
frunc2();
}
//暗黙の同期
}
//暗黙の同期 |
Example_atomic.3.c | /*
* @@name: atomic.3c
* @@type: C
* @@compilable: yes
* @@linkable: no
* @@expect: success
* @@version: omp_3.1
*/
int fetch_and_add(int *p)
{
/* Atomically read the value of *p and then increment it. The previous value
is
* returned. This can be used to implement a simple lock as shown below.
*/
int old;
#pragma omp atomic capture
{ old = *p; (*p)++; }
return old;
}
/*
* Use fetch_and_add to implement a lock
*/
struct locktype {
int ticketnumber;
int turn;
};
void do_locked_work(struct locktype *lock)
{
int atomic_read(const int *p);
void work();
// Obtain the lock
int myturn = fetch_and_add(&lock->ticketnumber);
while (atomic_read(&lock->turn) != myturn)
;
// Do some work. The flush is needed to ensure visibility of
// variables not involved in atomic directives
#pragma omp flush
work();
#pragma omp flush
// Release the lock
fetch_and_add(&lock->turn);
}
|
PD_TV_core.c | /*
* This work is part of the Core Imaging Library developed by
* Visual Analytics and Imaging System Group of the Science Technology
* Facilities Council, STFC
*
* Copyright 2019 Daniil Kazantsev
* Copyright 2019 Srikanth Nagella, Edoardo Pasca
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "PD_TV_core.h"
/* C-OMP implementation of Primal-Dual TV [1] by Chambolle Pock denoising/regularization model (2D/3D case)
*
* Input Parameters:
* 1. Noisy image/volume
* 2. lambdaPar - regularization parameter
* 3. Number of iterations
* 4. eplsilon: tolerance constant
* 5. lipschitz_const: convergence related parameter
* 6. TV-type: methodTV - 'iso' (0) or 'l1' (1)
* 7. nonneg: 'nonnegativity (0 is OFF by default, 1 is ON)
* Output:
* [1] TV - Filtered/regularized image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* [1] Antonin Chambolle, Thomas Pock. "A First-Order Primal-Dual Algorithm for Convex Problems with Applications to Imaging", 2010
*/
float PDTV_CPU_main(float *Input, float *U, float *infovector, float lambdaPar, int iterationsNumb, float epsil, float lipschitz_const, int methodTV, int nonneg, int dimX, int dimY, int dimZ)
{
int ll;
long j, DimTotal;
float re, re1, sigma, theta, lt, tau;
re = 0.0f; re1 = 0.0f;
int count = 0;
//tau = 1.0/powf(lipschitz_const,0.5);
//sigma = 1.0/powf(lipschitz_const,0.5);
tau = lambdaPar*0.1f;
sigma = 1.0/(lipschitz_const*tau);
theta = 1.0f;
lt = tau/lambdaPar;
ll = 0;
DimTotal = (long)(dimX*dimY*dimZ);
copyIm(Input, U, (long)(dimX), (long)(dimY), (long)(dimZ));
if (dimZ <= 1) {
/*2D case */
float *U_old=NULL, *P1=NULL, *P2=NULL;
U_old = calloc(DimTotal, sizeof(float));
P1 = calloc(DimTotal, sizeof(float));
P2 = calloc(DimTotal, sizeof(float));
/* begin iterations */
for(ll=0; ll<iterationsNumb; ll++) {
/* computing the the dual P variable */
DualP2D(U, P1, P2, (long)(dimX), (long)(dimY), sigma);
/* apply nonnegativity */
if (nonneg == 1) for(j=0; j<DimTotal; j++) {if (U[j] < 0.0f) U[j] = 0.0f;}
/* projection step */
Proj_func2D(P1, P2, methodTV, DimTotal);
/* copy U to U_old */
copyIm(U, U_old, (long)(dimX), (long)(dimY), 1l);
/* calculate divergence */
DivProj2D(U, Input, P1, P2,(long)(dimX), (long)(dimY), lt, tau);
/* check early stopping criteria */
if ((epsil != 0.0f) && (ll % 5 == 0)) {
re = 0.0f; re1 = 0.0f;
for(j=0; j<DimTotal; j++)
{
re += powf(U[j] - U_old[j],2);
re1 += powf(U[j],2);
}
re = sqrtf(re)/sqrtf(re1);
if (re < epsil) count++;
if (count > 3) break;
}
/*get updated solution*/
getX(U, U_old, theta, DimTotal);
}
free(P1); free(P2); free(U_old);
}
else {
/*3D case*/
float *U_old=NULL, *P1=NULL, *P2=NULL, *P3=NULL;
U_old = calloc(DimTotal, sizeof(float));
P1 = calloc(DimTotal, sizeof(float));
P2 = calloc(DimTotal, sizeof(float));
P3 = calloc(DimTotal, sizeof(float));
/* begin iterations */
for(ll=0; ll<iterationsNumb; ll++) {
/* computing the the dual P variable */
DualP3D(U, P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), sigma);
/* apply nonnegativity */
if (nonneg == 1) for(j=0; j<DimTotal; j++) {if (U[j] < 0.0f) U[j] = 0.0f;}
/* projection step */
Proj_func3D(P1, P2, P3, methodTV, DimTotal);
/* copy U to U_old */
copyIm(U, U_old, (long)(dimX), (long)(dimY), (long)(dimZ));
DivProj3D(U, Input, P1, P2, P3, (long)(dimX), (long)(dimY), (long)(dimZ), lt, tau);
/* check early stopping criteria */
if ((epsil != 0.0f) && (ll % 5 == 0)) {
re = 0.0f; re1 = 0.0f;
for(j=0; j<DimTotal; j++)
{
re += powf(U[j] - U_old[j],2);
re1 += powf(U[j],2);
}
re = sqrtf(re)/sqrtf(re1);
if (re < epsil) count++;
if (count > 3) break;
}
/*get updated solution*/
getX(U, U_old, theta, DimTotal);
}
free(P1); free(P2); free(P3); free(U_old);
}
/*adding info into info_vector */
infovector[0] = (float)(ll); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
return 0;
}
/*****************************************************************/
/************************2D-case related Functions */
/*****************************************************************/
/*Calculating dual variable (using forward differences)*/
float DualP2D(float *U, float *P1, float *P2, long dimX, long dimY, float sigma)
{
long i,j,index;
#pragma omp parallel for shared(U,P1,P2) private(index,i,j)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = j*dimX+i;
/* symmetric boundary conditions (Neuman) */
if (i == dimX-1) P1[index] += sigma*(U[j*dimX+(i-1)] - U[index]);
else P1[index] += sigma*(U[j*dimX+(i+1)] - U[index]);
if (j == dimY-1) P2[index] += sigma*(U[(j-1)*dimX+i] - U[index]);
else P2[index] += sigma*(U[(j+1)*dimX+i] - U[index]);
}}
return 1;
}
/* Divergence for P dual */
float DivProj2D(float *U, float *Input, float *P1, float *P2, long dimX, long dimY, float lt, float tau)
{
long i,j,index;
float P_v1, P_v2, div_var;
#pragma omp parallel for shared(U,Input,P1,P2) private(index, i, j, P_v1, P_v2, div_var)
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = j*dimX+i;
/* symmetric boundary conditions (Neuman) */
if (i == 0) P_v1 = -P1[index];
else P_v1 = -(P1[index] - P1[j*dimX+(i-1)]);
if (j == 0) P_v2 = -P2[index];
else P_v2 = -(P2[index] - P2[(j-1)*dimX+i]);
div_var = P_v1 + P_v2;
U[index] = (U[index] - tau*div_var + lt*Input[index])/(1.0 + lt);
}}
return *U;
}
/*get the updated solution*/
float getX(float *U, float *U_old, float theta, long DimTotal)
{
long i;
#pragma omp parallel for shared(U,U_old) private(i)
for(i=0; i<DimTotal; i++) {
U[i] += theta*(U[i] - U_old[i]);
}
return *U;
}
/*****************************************************************/
/************************3D-case related Functions */
/*****************************************************************/
/*Calculating dual variable (using forward differences)*/
float DualP3D(float *U, float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float sigma)
{
long i,j,k,index;
#pragma omp parallel for shared(U,P1,P2,P3) private(index,i,j,k)
for(k=0; k<dimZ; k++) {
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = (dimX*dimY)*k + j*dimX+i;
/* symmetric boundary conditions (Neuman) */
if (i == dimX-1) P1[index] += sigma*(U[(dimX*dimY)*k + j*dimX+(i-1)] - U[index]);
else P1[index] += sigma*(U[(dimX*dimY)*k + j*dimX+(i+1)] - U[index]);
if (j == dimY-1) P2[index] += sigma*(U[(dimX*dimY)*k + (j-1)*dimX+i] - U[index]);
else P2[index] += sigma*(U[(dimX*dimY)*k + (j+1)*dimX+i] - U[index]);
if (k == dimZ-1) P3[index] += sigma*(U[(dimX*dimY)*(k-1) + j*dimX+i] - U[index]);
else P3[index] += sigma*(U[(dimX*dimY)*(k+1) + j*dimX+i] - U[index]);
}}}
return 1;
}
/* Divergence for P dual */
float DivProj3D(float *U, float *Input, float *P1, float *P2, float *P3, long dimX, long dimY, long dimZ, float lt, float tau)
{
long i,j,k,index;
float P_v1, P_v2, P_v3, div_var;
#pragma omp parallel for shared(U,Input,P1,P2) private(index, i, j, k, P_v1, P_v2, P_v3, div_var)
for(k=0; k<dimZ; k++) {
for(j=0; j<dimY; j++) {
for(i=0; i<dimX; i++) {
index = (dimX*dimY)*k + j*dimX+i;
/* symmetric boundary conditions (Neuman) */
if (i == 0) P_v1 = -P1[index];
else P_v1 = -(P1[index] - P1[(dimX*dimY)*k + j*dimX+(i-1)]);
if (j == 0) P_v2 = -P2[index];
else P_v2 = -(P2[index] - P2[(dimX*dimY)*k + (j-1)*dimX+i]);
if (k == 0) P_v3 = -P3[index];
else P_v3 = -(P3[index] - P3[(dimX*dimY)*(k-1) + j*dimX+i]);
div_var = P_v1 + P_v2 + P_v3;
U[index] = (U[index] - tau*div_var + lt*Input[index])/(1.0 + lt);
}}}
return *U;
}
|
Efficient_RANSAC.h | // Copyright (c) 2015 INRIA Sophia-Antipolis (France).
// All rights reserved.
//
// This file is part of CGAL (www.cgal.org).
//
// $URL$
// $Id$
// SPDX-License-Identifier: GPL-3.0-or-later OR LicenseRef-Commercial
//
//
// Author(s) : Sven Oesau, Yannick Verdie, Clément Jamin, Pierre Alliez
//
#ifndef CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
#define CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
#include <CGAL/license/Shape_detection.h>
#include <CGAL/Random.h>
#include <CGAL/Shape_detection/Efficient_RANSAC/Octree.h>
#include <CGAL/Shape_detection/Efficient_RANSAC/Shape_base.h>
#include <CGAL/Shape_detection/Efficient_RANSAC/Plane.h>
// for octree ------------------------------
#include <boost/iterator/filter_iterator.hpp>
#include <CGAL/bounding_box.h>
#include <CGAL/Iterator_range.h>
//----------
#include <vector>
#include <cmath>
#include <limits>
#include <fstream>
#include <sstream>
#include <functional>
// boost --------------
#include <CGAL/boost/iterator/counting_iterator.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/make_shared.hpp>
//---------------------
namespace CGAL {
namespace Shape_detection {
/*!
\ingroup PkgShapeDetectionRANSAC
\brief Shape detection algorithm based on the RANSAC method.
Given a point set in 3D space with unoriented normals, sampled on surfaces,
this class enables to detect subsets of connected points lying on the surface of primitive shapes.
Each input point is assigned to either none or at most one detected primitive
shape. The implementation follows \cgalCite{schnabel2007efficient}.
\tparam Traits must be a model of `EfficientRANSACTraits`.
*/
template <class Traits>
class Efficient_RANSAC {
public:
/// \cond SKIP_IN_MANUAL
struct Filter_unassigned_points {
Filter_unassigned_points() : m_shape_index(dummy) {}
Filter_unassigned_points(const std::vector<int> &shapeIndex)
: m_shape_index(shapeIndex) {}
bool operator()(std::size_t x) {
if (x < m_shape_index.size())
return m_shape_index[x] == -1;
else return true; // to prevent infinite incrementing
}
const std::vector<int>& m_shape_index;
std::vector<int> dummy;
};
typedef boost::filter_iterator<Filter_unassigned_points,
boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t> > Point_index_iterator;
///< iterator for indices of points.
/// \endcond
/// \name Types
/// @{
/// \cond SKIP_IN_MANUAL
typedef typename Traits::Input_range::iterator Input_iterator;
typedef typename Traits::FT FT; ///< number type.
typedef typename Traits::Point_3 Point; ///< point type.
typedef typename Traits::Vector_3 Vector; ///< vector type.
/// \endcond
typedef typename Traits::Input_range Input_range;
///< Model of the concept `Range` with random access iterators, providing input points and normals
/// through the following two property maps.
typedef typename Traits::Point_map Point_map;
///< Property map to access the location of an input point.
typedef typename Traits::Normal_map Normal_map;
///< Property map to access the unoriented normal of an input point.
typedef Shape_base<Traits> Shape; ///< Shape type.
typedef Plane<Traits> Plane_shape; ///< %Plane shape type.
#ifdef DOXYGEN_RUNNING
typedef unspecified_type Shape_range;
///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Shape>`.
typedef unspecified_type Plane_range;
///< `Iterator_range` with a bidirectional constant iterator type with value type `boost::shared_ptr<Plane_shape>`.
#else
struct Shape_range : public Iterator_range<
typename std::vector<boost::shared_ptr<Shape> >::const_iterator> {
typedef Iterator_range<
typename std::vector<boost::shared_ptr<Shape> >::const_iterator> Base;
Shape_range(boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > >
extracted_shapes) : Base(make_range(extracted_shapes->begin(),
extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {}
private:
boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > >
m_extracted_shapes; // keeps a reference to the shape vector
};
struct Plane_range : public Iterator_range<
typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> {
typedef Iterator_range<
typename std::vector<boost::shared_ptr<Plane_shape> >::const_iterator> Base;
Plane_range(boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > >
extracted_shapes) : Base(make_range(extracted_shapes->begin(),
extracted_shapes->end())), m_extracted_shapes(extracted_shapes) {}
private:
boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > >
m_extracted_shapes; // keeps a reference to the shape vector
};
#endif
#ifdef DOXYGEN_RUNNING
typedef unspecified_type Point_index_range;
///< `Iterator_range` with a bidirectional iterator with value type `std::size_t`
/// as indices into the input data that has not been assigned to a shape.
/// As this range class has no `size()` method, the method
/// `Efficient_RANSAC::number_of_unassigned_points()` is provided.
#else
typedef Iterator_range<Point_index_iterator>
Point_index_range;
#endif
/// @}
/// \name Parameters
/// @{
/*!
Parameters for the shape detection algorithm. They are explained in detail
in Section \ref Shape_detection_RANSACParameters of the User Manual.
*/
struct Parameters {
Parameters()
: probability((FT) 0.01)
, min_points((std::numeric_limits<std::size_t>::max)())
, epsilon(-1)
, normal_threshold((FT) 0.9)
, cluster_epsilon(-1)
{}
/*!
Probability to control search endurance.
%Default value is 0.05.
A lower probability provides a higher reliability and determinism at the cost
of longer running time due to a higher search endurance.
It must belong to the interval [0, 1].
*/
FT probability;
/*!
Minimum number of points in a shape.
%Default value is 1% of total number of input points.
It must belong to the interval [0, +inf).
*/
std::size_t min_points;
/*!
Maximum acceptable Euclidean distance between a point and a shape.
%Default value is 1% of the bounding box diagonal.
It must belong to the interval [0, +inf).
*/
FT epsilon;
/*!
Maximum threshold on the dot product between the estimated
shape's normal and the point's normal, that is the cosine of the angle (cos(25°) = 0.9).
%Default value is 0.9 (around 25 degrees).
It must belong to the interval [0, 1].
*/
FT normal_threshold;
/*!
Maximum acceptable Euclidean distance between points, which are assumed to be neighbors.
%Default value is 1% of the bounding box diagonal.
It must belong to the interval [0, +inf).
*/
FT cluster_epsilon;
};
/// @}
private:
typedef internal::Octree<internal::DirectPointAccessor<Traits> >
Direct_octree;
typedef internal::Octree<internal::IndexedPointAccessor<Traits> >
Indexed_octree;
//--------------------------------------------typedef
// Creates a function pointer for instancing shape instances.
template <class ShapeT>
static Shape *factory() {
return new ShapeT;
}
public:
/// \name Initialization
/// @{
/*!
Constructs an empty shape detection object.
*/
Efficient_RANSAC(Traits t = Traits())
: m_traits(t)
, m_direct_octrees(nullptr)
, m_global_octree(nullptr)
, m_num_subsets(0)
, m_num_available_points(0)
, m_num_total_points(0)
, m_valid_iterators(false)
{}
/*!
Releases all memory allocated by this instance including shapes.
*/
~Efficient_RANSAC() {
clear();
}
/*!
Retrieves the traits class.
*/
const Traits&
traits() const
{
return m_traits;
}
/*!
Retrieves the point property map.
*/
const Point_map& point_map() const { return m_point_pmap; }
/*!
Retrieves the normal property map.
*/
const Normal_map& normal() const { return m_normal_pmap; }
Input_iterator input_iterator_first() const
{
return m_input_iterator_first;
}
Input_iterator input_iterator_beyond() const
{
return m_input_iterator_beyond;
}
/*!
Sets the input data. The range must stay valid
until the detection has been performed and the access to the
results is no longer required. The data in the input is reordered by the methods
`detect()` and `preprocess()`. This function first calls `clear()`.
*/
void set_input(
Input_range& input_range,
///< Range of input data.
Point_map point_map = Point_map(),
///< Property map to access the position of an input point.
Normal_map normal_map = Normal_map()
///< Property map to access the normal of an input point.
) {
m_point_pmap = point_map;
m_normal_pmap = normal_map;
m_input_iterator_first = input_range.begin();
m_input_iterator_beyond = input_range.end();
clear();
m_extracted_shapes =
boost::make_shared<std::vector<boost::shared_ptr<Shape> > >();
m_num_available_points = m_num_total_points = std::distance(
m_input_iterator_first, m_input_iterator_beyond);
m_valid_iterators = true;
}
/*!
Registers the shape type `ShapeType` in the detection engine that must inherit from `Shape_base`.
For example, for registering a plane as detectable shape, you should call
`ransac.add_shape_factory< Shape_detection::Plane<Traits> >();`. Note
that if your call is within a template, you should add the `template`
keyword just before `add_shape_factory`:
`ransac.template add_shape_factory< Shape_detection::Plane<Traits> >();`.
*/
template <class Shape_type>
void add_shape_factory() {
m_shape_factories.push_back(factory<Shape_type>);
}
/*!
Constructs internal data structures required for the shape detection.
These structures only depend on the input data, i.e. the points and
normal vectors. This method is called by `detect()`, if it was not called
before by the user.
*/
bool preprocess() {
if (m_num_total_points == 0)
return false;
// Generation of subsets
m_num_subsets = (std::size_t)(std::max<std::ptrdiff_t>)((std::ptrdiff_t)
std::floor(std::log(double(m_num_total_points))/std::log(2.))-9, 2);
// SUBSET GENERATION ->
// approach with increasing subset sizes -> replace with octree later on
Input_iterator last = m_input_iterator_beyond - 1;
std::size_t remainingPoints = m_num_total_points;
m_available_octree_sizes.resize(m_num_subsets);
m_direct_octrees = new Direct_octree *[m_num_subsets];
for (int s = int(m_num_subsets) - 1;s >= 0;--s) {
std::size_t subsetSize = remainingPoints;
std::vector<std::size_t> indices(subsetSize);
if (s) {
subsetSize >>= 1;
for (std::size_t i = 0;i<subsetSize;i++) {
std::size_t index = get_default_random()(2);
index = index + (i<<1);
index = (index >= remainingPoints) ? remainingPoints - 1 : index;
indices[i] = index;
}
// move points to the end of the point vector
std::size_t j = subsetSize;
do {
j--;
typename std::iterator_traits<Input_iterator>::value_type
tmp = (*last);
*last = m_input_iterator_first[indices[std::size_t(j)]];
m_input_iterator_first[indices[std::size_t(j)]] = tmp;
last--;
} while (j > 0);
m_direct_octrees[s] = new Direct_octree(
m_traits, last + 1,
last + subsetSize + 1,
m_point_pmap, m_normal_pmap,
remainingPoints - subsetSize);
}
else
m_direct_octrees[0] = new Direct_octree(
m_traits, m_input_iterator_first,
m_input_iterator_first + (subsetSize),
m_point_pmap, m_normal_pmap,
0);
m_available_octree_sizes[s] = subsetSize;
m_direct_octrees[s]->createTree(m_options.cluster_epsilon);
remainingPoints -= subsetSize;
}
m_global_octree = new Indexed_octree(
m_traits, m_input_iterator_first, m_input_iterator_beyond,
m_point_pmap, m_normal_pmap);
m_global_octree->createTree(m_options.cluster_epsilon);
return true;
}
/// @}
/// \name Memory Management
/// @{
/*!
Removes all shape types registered for detection.
*/
void clear_shape_factories() {
m_shape_factories.clear();
}
/*!
Frees memory allocated for the internal search structures but keeps the detected shapes.
It invalidates the range retrieved using `unassigned_points()`.
*/
void clear_octrees() {
// If there is no data yet, there are no data structures.
if (!m_valid_iterators)
return;
if (m_global_octree) {
delete m_global_octree;
m_global_octree = nullptr;
}
if (m_direct_octrees) {
for (std::size_t i = 0;i<m_num_subsets;i++)
delete m_direct_octrees[i];
delete [] m_direct_octrees;
m_direct_octrees = nullptr;
}
m_num_subsets = 0;
}
/*!
Calls `clear_octrees()` and removes all detected shapes.
All internal structures are cleaned, including formerly detected shapes.
Thus iterators and ranges retrieved through `shapes()`, `planes()` and `indices_of_unassigned_points()`
are invalidated.
*/
void clear() {
// If there is no data yet, there are no data structures.
if (!m_valid_iterators)
return;
std::vector<int>().swap(m_shape_index);
m_extracted_shapes =
boost::make_shared<std::vector<boost::shared_ptr<Shape> > >();
m_num_available_points = m_num_total_points;
clear_octrees();
clear_shape_factories();
}
/// @}
/// \name Detection
/// @{
/*!
Performs the shape detection. Shape types considered during the detection
are those registered using `add_shape_factory()`.
\param options parameters for shape detection
\param callback can be omitted if the algorithm should be run
without any callback. It is called regularly when the algorithm
is running: the current advancement (between 0.0 and 1.0) is
passed as parameter. If it returns `true`, then the algorithm
continues its execution normally; if it returns `false`, the
algorithm is stopped. Note that this interruption may leave the
class in an invalid state.
\return `true` if shape types have been registered and
input data has been set. Otherwise, `false` is returned.
*/
bool detect(const Parameters &options = Parameters(),
const std::function<bool(double)>& callback
= std::function<bool(double)>())
{
m_options = options;
// No shape types for detection or no points provided, exit
if (m_shape_factories.size() == 0 ||
(m_input_iterator_beyond - m_input_iterator_first) == 0)
return false;
if (m_num_subsets == 0 || m_global_octree == 0) {
if (!preprocess())
return false;
}
if (callback && !callback(0.))
return false;
// Reset data structures possibly used by former search
m_extracted_shapes =
boost::make_shared<std::vector<boost::shared_ptr<Shape> > >();
m_num_available_points = m_num_total_points;
for (std::size_t i = 0;i<m_num_subsets;i++) {
m_available_octree_sizes[i] = m_direct_octrees[i]->size();
}
// Use bounding box diagonal as reference for default values
Bbox_3 bbox = m_global_octree->boundingBox();
FT bbox_diagonal = (FT) CGAL::sqrt(
(bbox.xmax() - bbox.xmin()) * (bbox.xmax() - bbox.xmin())
+ (bbox.ymax() - bbox.ymin()) * (bbox.ymax() - bbox.ymin())
+ (bbox.zmax() - bbox.zmin()) * (bbox.zmax() - bbox.zmin()));
// Epsilon or cluster_epsilon have been set by the user?
// If not, derive from bounding box diagonal
m_options.epsilon = (m_options.epsilon < 0)
? bbox_diagonal * (FT) 0.01 : m_options.epsilon;
m_options.cluster_epsilon = (m_options.cluster_epsilon < 0)
? bbox_diagonal * (FT) 0.01 : m_options.cluster_epsilon;
// Minimum number of points has been set?
m_options.min_points =
(m_options.min_points == (std::numeric_limits<std::size_t>::max)()) ?
(std::size_t)((FT)0.01 * m_num_available_points) :
m_options.min_points;
m_options.min_points = (m_options.min_points < 10) ? 10 : m_options.min_points;
// Initializing the shape index
m_shape_index.assign(m_num_available_points, -1);
if (m_options.min_points > m_num_available_points)
return true;
// List of all randomly drawn candidates
// with the minimum number of points
std::vector<Shape *> candidates;
// Identifying minimum number of samples
m_required_samples = 0;
for (std::size_t i = 0;i<m_shape_factories.size();i++) {
Shape *tmp = (Shape *) m_shape_factories[i]();
m_required_samples = (std::max<std::size_t>)(m_required_samples, tmp->minimum_sample_size());
delete tmp;
}
std::size_t first_sample; // first sample for RANSAC
FT best_expected = 0;
// number of points that have been assigned to a shape
std::size_t num_invalid = 0;
std::size_t generated_candidates = 0;
std::size_t failed_candidates = 0;
std::size_t limit_failed_candidates = (std::max)(std::size_t(10000),
std::size_t(m_input_iterator_beyond
- m_input_iterator_first)
/ std::size_t(100));
bool force_exit = false;
bool keep_searching = true;
do { // main loop
best_expected = 0;
if (keep_searching)
do {
// Search (remaining_points / min_points) shapes (max 200 per iteration, min 1)
std::size_t search_number
= (std::min)(std::size_t(200),
(std::max)(std::size_t((m_num_available_points - num_invalid) / double(m_options.min_points)),
std::size_t(1)));
for (std::size_t nb = 0; nb < search_number; ++ nb)
{
// Generate candidates
//1. pick a point p1 randomly among available points
std::set<std::size_t> indices;
bool done = false;
do {
do
first_sample = get_default_random()(
static_cast<unsigned int>(m_num_available_points));
while (m_shape_index[first_sample] != -1);
done = m_global_octree->drawSamplesFromCellContainingPoint(
get(m_point_pmap,
*(m_input_iterator_first + first_sample)),
select_random_octree_level(),
indices,
m_shape_index,
m_required_samples);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
} while (m_shape_index[first_sample] != -1 || !done);
generated_candidates++;
//add candidate for each type of primitives
for(typename std::vector<Shape *(*)()>::iterator it =
m_shape_factories.begin(); it != m_shape_factories.end(); it++) {
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
Shape *p = (Shape *) (*it)();
//compute the primitive and says if the candidate is valid
p->compute(indices,
m_input_iterator_first,
m_traits,
m_point_pmap,
m_normal_pmap,
m_options.epsilon,
m_options.normal_threshold);
if (p->is_valid()) {
improve_bound(p, m_num_available_points - num_invalid, 1, 500);
//evaluate the candidate
if(p->max_bound() >= m_options.min_points && p->score() > 0) {
if (best_expected < p->expected_value())
best_expected = p->expected_value();
candidates.push_back(p);
}
else {
failed_candidates++;
delete p;
}
}
else {
failed_candidates++;
delete p;
}
}
}
if (failed_candidates >= limit_failed_candidates)
{
force_exit = true;
}
keep_searching = (stop_probability(m_options.min_points,
m_num_available_points - num_invalid,
generated_candidates, m_global_octree->maxLevel())
> m_options.probability);
} while( !force_exit
&& stop_probability((std::size_t) best_expected,
m_num_available_points - num_invalid,
generated_candidates,
m_global_octree->maxLevel())
> m_options.probability
&& keep_searching);
// end of generate candidate
if (force_exit) {
break;
}
if (candidates.empty())
continue;
// Now get the best candidate in the current set of all candidates
// Note that the function sorts the candidates:
// the best candidate is always the last element of the vector
Shape *best_candidate =
get_best_candidate(candidates, m_num_available_points - num_invalid);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
// If search is done and the best candidate is too small, we are done.
if (!keep_searching && best_candidate->m_score < m_options.min_points)
break;
if (!best_candidate)
continue;
best_candidate->m_indices.clear();
best_candidate->m_score =
m_global_octree->score(best_candidate,
m_shape_index,
FT(3) * m_options.epsilon,
m_options.normal_threshold);
best_expected = static_cast<FT>(best_candidate->m_score);
best_candidate->connected_component(best_candidate->m_indices,
m_options.cluster_epsilon);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
// check score against min_points and clear out candidates if too low
if (best_candidate->indices_of_assigned_points().size() <
m_options.min_points)
{
if (!(best_candidate->indices_of_assigned_points().empty()))
for (std::size_t i = 0;i < candidates.size() - 1;i++) {
if (best_candidate->is_same(candidates[i])) {
delete candidates[i];
candidates[i] = nullptr;
}
}
candidates.back() = nullptr;
delete best_candidate;
best_candidate = nullptr;
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
// Trimming candidates list
std::size_t empty = 0, occupied = 0;
while (empty < candidates.size()) {
while (empty < candidates.size() && candidates[empty]) empty++;
if (empty >= candidates.size())
break;
if (occupied < empty)
occupied = empty + 1;
while (occupied < candidates.size() && !candidates[occupied])
occupied++;
if (occupied >= candidates.size())
break;
candidates[empty] = candidates[occupied];
candidates[occupied] = nullptr;
empty++;
occupied++;
}
candidates.resize(empty);
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
}
else
if (stop_probability((std::size_t) best_candidate->expected_value(),
(m_num_available_points - num_invalid),
generated_candidates,
m_global_octree->maxLevel())
<= m_options.probability) {
// Remove candidate from list
candidates.back() = nullptr;
//1. add best candidate to final result.
m_extracted_shapes->push_back(
boost::shared_ptr<Shape>(best_candidate));
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
//2. remove the points
const std::vector<std::size_t> &indices_points_best_candidate =
best_candidate->indices_of_assigned_points();
// update generated candidates to reflect removal of points
generated_candidates = std::size_t(std::pow (1.f - (indices_points_best_candidate.size() /
float(m_num_available_points - num_invalid)), 3.f)
* generated_candidates);
//2.3 Remove the points from the subtrees
for (std::size_t i = 0;i<indices_points_best_candidate.size();i++) {
m_shape_index[indices_points_best_candidate.at(i)] =
int(m_extracted_shapes->size()) - 1;
num_invalid++;
for (std::size_t j = 0;j<m_num_subsets;j++) {
if (m_direct_octrees[j] && m_direct_octrees[j]->m_root) {
std::size_t offset = m_direct_octrees[j]->offset();
if (offset <= indices_points_best_candidate.at(i) &&
(indices_points_best_candidate.at(i) - offset)
< m_direct_octrees[j]->size()) {
m_available_octree_sizes[j]--;
}
}
}
}
failed_candidates = 0;
best_expected = 0;
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
std::vector<std::size_t> subset_sizes(m_num_subsets);
subset_sizes[0] = m_available_octree_sizes[0];
for (std::size_t i = 1;i<m_num_subsets;i++) {
subset_sizes[i] = subset_sizes[i-1] + m_available_octree_sizes[i];
}
//3. Remove points from candidates common with extracted primitive
//#pragma omp parallel for
best_expected = 0;
for (std::size_t i=0;i< candidates.size()-1;i++) {
if (candidates[i]) {
candidates[i]->update_points(m_shape_index);
candidates[i]->compute_bound(
subset_sizes[candidates[i]->m_nb_subset_used - 1],
m_num_available_points - num_invalid);
if (candidates[i]->max_bound() < m_options.min_points) {
delete candidates[i];
candidates[i] = nullptr;
}
else {
best_expected = (candidates[i]->expected_value() > best_expected) ?
candidates[i]->expected_value() : best_expected;
}
}
}
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
std::size_t start = 0, end = candidates.size() - 1;
while (start < end) {
while (candidates[start] && start < end) start++;
while (!candidates[end] && start < end) end--;
if (!candidates[start] && candidates[end] && start < end) {
candidates[start] = candidates[end];
candidates[end] = nullptr;
start++;
end--;
}
}
if (candidates[end]) end++;
candidates.resize(end);
}
else if (!keep_searching)
++ generated_candidates;
if (callback && !callback(num_invalid / double(m_num_total_points)))
return false;
keep_searching = (stop_probability(m_options.min_points,
m_num_available_points - num_invalid,
generated_candidates,
m_global_octree->maxLevel())
> m_options.probability);
}
while((keep_searching
&& FT(m_num_available_points - num_invalid) >= m_options.min_points)
|| best_expected >= m_options.min_points);
// Clean up remaining candidates.
for (std::size_t i = 0;i<candidates.size();i++)
delete candidates[i];
candidates.resize(0);
m_num_available_points -= num_invalid;
return true;
}
/// @}
/// \name Access
/// @{
/*!
Returns an `Iterator_range` with a bidirectional iterator with value type
`boost::shared_ptr<Shape>` over the detected shapes in the order of detection.
Depending on the chosen probability
for the detection, the shapes are ordered with decreasing size.
*/
Shape_range shapes() const {
return Shape_range(m_extracted_shapes);
}
/*!
Returns an `Iterator_range` with a bidirectional iterator with
value type `boost::shared_ptr<Plane_shape>` over only the
detected planes in the order of detection. Depending on the
chosen probability for the detection, the planes are ordered
with decreasing size.
*/
Plane_range planes() const {
boost::shared_ptr<std::vector<boost::shared_ptr<Plane_shape> > > planes
= boost::make_shared<std::vector<boost::shared_ptr<Plane_shape> > >();
for (std::size_t i = 0; i < m_extracted_shapes->size(); ++ i)
{
boost::shared_ptr<Plane_shape> pshape
= boost::dynamic_pointer_cast<Plane_shape>((*m_extracted_shapes)[i]);
// Ignore all shapes other than plane
if (pshape != boost::shared_ptr<Plane_shape>())
planes->push_back (pshape);
}
return Plane_range(planes);
}
/*!
Number of points not assigned to a shape.
*/
std::size_t number_of_unassigned_points() const {
return m_num_available_points;
}
/*!
Returns an `Iterator_range` with a bidirectional iterator with value type `std::size_t`
as indices into the input data that has not been assigned to a shape.
*/
Point_index_range indices_of_unassigned_points() {
Filter_unassigned_points fup(m_shape_index);
Point_index_iterator p1 =
boost::make_filter_iterator<Filter_unassigned_points>(
fup,
boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(0),
boost::counting_iterator<std::size_t, boost::use_default, std::ptrdiff_t>(m_shape_index.size()));
return make_range(p1, Point_index_iterator(p1.end()));
}
/// @}
private:
int select_random_octree_level() {
return (int) get_default_random()(
static_cast<unsigned int>(m_global_octree->maxLevel() + 1));
}
Shape* get_best_candidate(std::vector<Shape* >& candidates,
const std::size_t num_available_points) {
if (candidates.size() == 1)
return candidates.back();
int index_worse_candidate = 0;
bool improved = true;
while (index_worse_candidate < (int)candidates.size() - 1 && improved) {
improved = false;
typename Shape::Compare_by_max_bound comp;
std::sort(candidates.begin() + index_worse_candidate,
candidates.end(),
comp);
//refine the best one
improve_bound(candidates.back(),
num_available_points, m_num_subsets,
m_options.min_points);
int position_stop;
//Take all those intersecting the best one, check for equal ones
for (position_stop = int(candidates.size()) - 1;
position_stop > index_worse_candidate;
position_stop--) {
if (candidates.back()->min_bound() >
candidates.at(position_stop)->max_bound())
break;//the intervals do not overlaps anymore
if (candidates.at(position_stop)->max_bound()
<= m_options.min_points)
break; //the following candidate doesn't have enough points!
//if we reach this point, there is an overlap
// between best one and position_stop
//so request refining bound on position_stop
improved |= improve_bound(candidates.at(position_stop),
num_available_points,
m_num_subsets,
m_options.min_points);
//test again after refined
if (candidates.back()->min_bound() >
candidates.at(position_stop)->max_bound())
break;//the intervals do not overlaps anymore
}
index_worse_candidate = position_stop;
}
return candidates.back();
}
bool improve_bound(Shape *candidate,
std::size_t num_available_points,
std::size_t max_subset,
std::size_t min_points) {
if (candidate->m_nb_subset_used >= max_subset)
return false;
if (candidate->m_nb_subset_used >= m_num_subsets)
return false;
candidate->m_nb_subset_used =
(candidate->m_nb_subset_used >= m_num_subsets) ?
m_num_subsets - 1 : candidate->m_nb_subset_used;
//what it does is add another subset and recompute lower and upper bound
//the next subset to include is provided by m_nb_subset_used
std::size_t num_points_evaluated = 0;
for (std::size_t i=0;i<candidate->m_nb_subset_used;i++)
num_points_evaluated += m_available_octree_sizes[i];
// need score of new subset as well as sum of
// the score of the previous considered subset
std::size_t new_score = 0;
std::size_t new_sampled_points = 0;
do {
new_score = m_direct_octrees[candidate->m_nb_subset_used]->score(
candidate,
m_shape_index,
m_options.epsilon,
m_options.normal_threshold);
candidate->m_score += new_score;
num_points_evaluated +=
m_available_octree_sizes[candidate->m_nb_subset_used];
new_sampled_points +=
m_available_octree_sizes[candidate->m_nb_subset_used];
candidate->m_nb_subset_used++;
} while (new_sampled_points < min_points &&
candidate->m_nb_subset_used < m_num_subsets);
candidate->m_score = candidate->m_indices.size();
candidate->compute_bound(num_points_evaluated, num_available_points);
return true;
}
inline FT stop_probability(std::size_t largest_candidate, std::size_t num_pts, std::size_t num_candidates, std::size_t octree_depth) const {
return (std::min<FT>)(std::pow((FT) 1.f - (FT) largest_candidate
/ (FT(num_pts) * (octree_depth+1) * (1 << (m_required_samples - 1))), (int) num_candidates), (FT) 1);
}
private:
Parameters m_options;
// Traits class.
Traits m_traits;
// Octrees build on input data for quick shape evaluation and
// sample selection within an octree cell.
Direct_octree **m_direct_octrees;
Indexed_octree *m_global_octree;
std::vector<std::size_t> m_available_octree_sizes;
std::size_t m_num_subsets;
// maps index into points to assigned extracted primitive
std::vector<int> m_shape_index;
std::size_t m_num_available_points;
std::size_t m_num_total_points;
std::size_t m_required_samples;
//give the index of the subset of point i
std::vector<int> m_index_subsets;
boost::shared_ptr<std::vector<boost::shared_ptr<Shape> > > m_extracted_shapes;
std::vector<Shape *(*)()> m_shape_factories;
// iterators of input data
bool m_valid_iterators;
Input_iterator m_input_iterator_first, m_input_iterator_beyond;
Point_map m_point_pmap;
Normal_map m_normal_pmap;
};
}
}
#endif // CGAL_SHAPE_DETECTION_EFFICIENT_RANSAC_H
|
Integration.h | #ifndef Integration_h
#define Integration_h
void make_hist(double value,double *hist)
{
int index;
double in=(value-Low)*Hset;
index=(int)in;
if (index>=0 && index < HSize) {
hist[index]++;
}
}
void Output_hist(double *hist)
{
double x;
FILE *fp;
fp=fopen("Hist.dat","w");
int i;
for (i=0; i<HSize; i++) {
x=(double)i/Hset+Low;
fprintf(fp,"%lf %lf\n",x,hist[i]/(double)Bin);
}
fclose(fp);
}
void MCStep(double *average,double *sum_of_squares,double *hist)
{
double a=0,s=0;
int i;
#pragma omp parallel num_threads(THREADS)
{
sfmt_t sfmt;
sfmt_init_gen_rand(&sfmt, (unsigned int)time(NULL)+omp_get_thread_num());
uint64_t *array;
int k=0;
array = malloc(sizeof(double)*Array_size);
sfmt_fill_array64(&sfmt, array,Array_size);
srand((unsigned)time(NULL)+omp_get_thread_num());
#pragma omp for reduction(+:a,s)
for (i=0; i<Bin; i++) {
double x=sqrt(-2.0*log(sfmt_to_res53(array[k++])));
double y=PPI*sfmt_to_res53(array[k++]);
//double x=sqrt(-2.0*log((double)rand()/RAND_MAX));
//double y=PPI*(double)rand()/RAND_MAX;
double value=x*cos(y);
//double value=S_A(array,&k);
//x*sin(y);
a+=value;
s+=(value*value);
make_hist(value,hist);
//printf("%lf %lf %lf\n",value,x,y);
}
*average=a/(double)Bin;
*sum_of_squares=s/(double)Bin;
free(array);
}
}
#endif
|
attribute.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE %
% A A T T R R I B B U U T E %
% AAAAA T T RRRR I BBBB U U T EEE %
% A A T T R R I B B U U T E %
% A A T T R R IIIII BBBB UUU T EEEEE %
% %
% %
% MagickCore Get / Set Image Attributes %
% %
% Software Design %
% Cristy %
% October 2002 %
% %
% %
% Copyright 1999-2014 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/channel.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colormap-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/effect.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/identify.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/memory_.h"
#include "magick/magick.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/segment.h"
#include "magick/splay-tree.h"
#include "magick/string_.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e B o u n d i n g B o x %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageBoundingBox() returns the bounding box of an image canvas.
%
% The format of the GetImageBoundingBox method is:
%
% RectangleInfo GetImageBoundingBox(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o bounds: Method GetImageBoundingBox returns the bounding box of an
% image canvas.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport RectangleInfo GetImageBoundingBox(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
target[3],
zero;
RectangleInfo
bounds;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
bounds.width=0;
bounds.height=0;
bounds.x=(ssize_t) image->columns;
bounds.y=(ssize_t) image->rows;
GetMagickPixelPacket(image,&target[0]);
image_view=AcquireVirtualCacheView(image,exception);
p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception);
if (p == (const PixelPacket *) NULL)
{
image_view=DestroyCacheView(image_view);
return(bounds);
}
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[0]);
GetMagickPixelPacket(image,&target[1]);
p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1,
exception);
if (p != (const PixelPacket *) NULL)
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[1]);
GetMagickPixelPacket(image,&target[2]);
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1,
exception);
if (p != (const PixelPacket *) NULL)
SetMagickPixelPacket(image,p,GetCacheViewVirtualIndexQueue(image_view),
&target[2]);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
RectangleInfo
bounding_box;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
bounding_box=bounds;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((x < bounding_box.x) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.x=x;
if ((x > (ssize_t) bounding_box.width) &&
(IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse))
bounding_box.width=(size_t) x;
if ((y < bounding_box.y) &&
(IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse))
bounding_box.y=y;
if ((y > (ssize_t) bounding_box.height) &&
(IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse))
bounding_box.height=(size_t) y;
p++;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_GetImageBoundingBox)
#endif
{
if (bounding_box.x < bounds.x)
bounds.x=bounding_box.x;
if (bounding_box.y < bounds.y)
bounds.y=bounding_box.y;
if (bounding_box.width > bounds.width)
bounds.width=bounding_box.width;
if (bounding_box.height > bounds.height)
bounds.height=bounding_box.height;
}
}
image_view=DestroyCacheView(image_view);
if ((bounds.width == 0) || (bounds.height == 0))
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
else
{
bounds.width-=(bounds.x-1);
bounds.height-=(bounds.y-1);
}
return(bounds);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageChannelDepth() returns the depth of a particular image channel.
%
% The format of the GetImageChannelDepth method is:
%
% size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
% size_t GetImageChannelDepth(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception)
{
return(GetImageChannelDepth(image,CompositeChannels,exception));
}
MagickExport size_t GetImageChannelDepth(const Image *image,
const ChannelType channel,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
register ssize_t
i;
size_t
*current_depth,
depth,
number_threads;
ssize_t
y;
/*
Compute image depth.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
current_depth=(size_t *) AcquireQuantumMemory(number_threads,
sizeof(*current_depth));
if (current_depth == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
status=MagickTrue;
for (i=0; i < (ssize_t) number_threads; i++)
current_depth[i]=1;
if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse))
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
const int
id = GetOpenMPThreadId();
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].red,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].green,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0))
if (IsPixelAtDepth(image->colormap[i].blue,range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
}
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
image_view=AcquireVirtualCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if (QuantumRange <= MaxMap)
RestoreMSCWarning
{
size_t
*depth_map;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
{
unsigned int
depth;
for (depth=1; depth < MAGICKCORE_QUANTUM_DEPTH; depth++)
{
Quantum
pixel;
QuantumAny
range;
range=GetQuantumRange(depth);
pixel=(Quantum) i;
if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range))
break;
}
depth_map[i]=depth;
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
if ((channel & RedChannel) != 0)
{
pixel=GetPixelRed(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & GreenChannel) != 0)
{
pixel=GetPixelGreen(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if ((channel & BlueChannel) != 0)
{
pixel=GetPixelBlue(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
{
pixel=GetPixelOpacity(p);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
{
pixel=GetPixelIndex(indexes+x);
if (depth_map[ScaleQuantumToMap(pixel)] > current_depth[id])
current_depth[id]=depth_map[ScaleQuantumToMap(pixel)];
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
depth_map=(size_t *) RelinquishMagickMemory(depth_map);
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
#endif
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewVirtualIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH)
{
MagickBooleanType
atDepth;
QuantumAny
range;
atDepth=MagickTrue;
range=GetQuantumRange(current_depth[id]);
if ((atDepth != MagickFalse) && ((channel & RedChannel) != 0))
if (IsPixelAtDepth(GetPixelRed(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & GreenChannel) != 0))
if (IsPixelAtDepth(GetPixelGreen(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & BlueChannel) != 0))
if (IsPixelAtDepth(GetPixelBlue(p),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse) && ((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
if (IsPixelAtDepth(GetPixelOpacity(p),range) == MagickFalse)
atDepth=MagickTrue;
if ((atDepth != MagickFalse) && ((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
if (IsPixelAtDepth(GetPixelIndex(indexes+x),range) == MagickFalse)
atDepth=MagickFalse;
if ((atDepth != MagickFalse))
break;
current_depth[id]++;
}
p++;
}
if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
depth=current_depth[0];
for (i=1; i < (ssize_t) number_threads; i++)
if (depth < current_depth[i])
depth=current_depth[i];
current_depth=(size_t *) RelinquishMagickMemory(current_depth);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e Q u a n t u m D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageQuantumDepth() returns the depth of the image rounded to a legal
% quantum depth: 8, 16, or 32.
%
% The format of the GetImageQuantumDepth method is:
%
% size_t GetImageQuantumDepth(const Image *image,
% const MagickBooleanType constrain)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o constrain: A value other than MagickFalse, constrains the depth to
% a maximum of MAGICKCORE_QUANTUM_DEPTH.
%
*/
static inline double MagickMin(const double x,const double y)
{
if (x < y)
return(x);
return(y);
}
MagickExport size_t GetImageQuantumDepth(const Image *image,
const MagickBooleanType constrain)
{
size_t
depth;
depth=image->depth;
if (depth <= 8)
depth=8;
else
if (depth <= 16)
depth=16;
else
if (depth <= 32)
depth=32;
else
if (depth <= 64)
depth=64;
if (constrain != MagickFalse)
depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH);
return(depth);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageType() returns the potential type of image:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
%
% To ensure the image type matches its potential, use SetImageType():
%
% (void) SetImageType(image,GetImageType(image));
%
% The format of the GetImageType method is:
%
% ImageType GetImageType(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == CMYKColorspace)
{
if (image->matte == MagickFalse)
return(ColorSeparationType);
return(ColorSeparationMatteType);
}
if (IsMonochromeImage(image,exception) != MagickFalse)
return(BilevelType);
if (IsGrayImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(GrayscaleMatteType);
return(GrayscaleType);
}
if (IsPaletteImage(image,exception) != MagickFalse)
{
if (image->matte != MagickFalse)
return(PaletteMatteType);
return(PaletteType);
}
if (image->matte != MagickFalse)
return(TrueColorMatteType);
return(TrueColorType);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s G r a y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsGrayImage() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities.
%
% The format of the IsGrayImage method is:
%
% MagickBooleanType IsGrayImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsGrayImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->type == BilevelType) || (image->type == GrayscaleType) ||
(image->type == GrayscaleMatteType))
return(MagickTrue);
if ((IsGrayColorspace(image->colorspace) == MagickFalse) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsGrayPixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse))
type=GrayscaleType;
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
((Image *) image)->type=type;
if ((type == GrayscaleType) && (image->matte != MagickFalse))
((Image *) image)->type=GrayscaleMatteType;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s M o n o c h r o m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsMonochromeImage() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange.
%
% The format of the IsMonochromeImage method is:
%
% MagickBooleanType IsMonochromeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsMonochromeImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
ImageType
type;
MagickBooleanType
status;
register ssize_t
x;
register const PixelPacket
*p;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->type == BilevelType)
return(MagickTrue);
if ((IsGrayColorspace(image->colorspace) == MagickFalse) &&
(IssRGBCompatibleColorspace(image->colorspace) == MagickFalse))
return(MagickFalse);
type=BilevelType;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (IsMonochromePixel(p) == MagickFalse)
{
type=UndefinedType;
break;
}
p++;
}
if (type == UndefinedType)
break;
}
image_view=DestroyCacheView(image_view);
if (type == UndefinedType)
return(MagickFalse);
((Image *) image)->colorspace=GRAYColorspace;
status=SyncImagePixelCache((Image *) image,exception);
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(status);
((Image *) image)->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s O p a q u e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsOpaqueImage() returns MagickTrue if none of the pixels in the image have
% an opacity value other than opaque (0).
%
% The format of the IsOpaqueImage method is:
%
% MagickBooleanType IsOpaqueImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsOpaqueImage(const Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
/*
Determine if image is opaque.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->matte == MagickFalse)
return(MagickTrue);
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelOpacity(p) != OpaqueOpacity)
break;
p++;
}
if (x < (ssize_t) image->columns)
break;
}
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l D e p t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelDepth() sets the depth of the image.
%
% The format of the SetImageChannelDepth method is:
%
% MagickBooleanType SetImageDepth(Image *image,const size_t depth)
% MagickBooleanType SetImageChannelDepth(Image *image,
% const ChannelType channel,const size_t depth)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: the channel.
%
% o depth: the image depth.
%
*/
static inline Quantum ClampPixel(const MagickRealType value)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
return((Quantum) value);
#else
if (value < 0.0f)
return(0.0f);
if (value >= (MagickRealType) QuantumRange)
return((Quantum) QuantumRange);
return(value);
#endif
}
MagickExport MagickBooleanType SetImageDepth(Image *image,
const size_t depth)
{
return(SetImageChannelDepth(image,CompositeChannels,depth));
}
MagickExport MagickBooleanType SetImageChannelDepth(Image *image,
const ChannelType channel,const size_t depth)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
QuantumAny
range;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (depth >= MAGICKCORE_QUANTUM_DEPTH)
{
image->depth=depth;
return(MagickTrue);
}
range=GetQuantumRange(depth);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
if ((channel & RedChannel) != 0)
image->colormap[i].red=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].red),range),range);
if ((channel & GreenChannel) != 0)
image->colormap[i].green=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].green),range),range);
if ((channel & BlueChannel) != 0)
image->colormap[i].blue=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].blue),range),range);
if ((channel & OpacityChannel) != 0)
image->colormap[i].opacity=ScaleAnyToQuantum(ScaleQuantumToAny(
ClampPixel(image->colormap[i].opacity),range),range);
}
}
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
DisableMSCWarning(4127)
if (QuantumRange <= MaxMap)
RestoreMSCWarning
{
Quantum
*depth_map;
register ssize_t
i;
/*
Scale pixels to desired (optimized with depth map).
*/
depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map));
if (depth_map == (Quantum *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i <= (ssize_t) MaxMap; i++)
depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range),
range);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,depth_map[ScaleQuantumToMap(GetPixelRed(q))]);
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,depth_map[ScaleQuantumToMap(GetPixelGreen(q))]);
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,depth_map[ScaleQuantumToMap(GetPixelBlue(q))]);
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,depth_map[ScaleQuantumToMap(GetPixelOpacity(q))]);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
depth_map=(Quantum *) RelinquishMagickMemory(depth_map);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
#endif
/*
Scale pixels to desired depth.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register PixelPacket
*restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((channel & RedChannel) != 0)
SetPixelRed(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
GetPixelRed(q)),range),range));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
GetPixelGreen(q)),range),range));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
GetPixelBlue(q)),range),range));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelOpacity(q,ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel(
GetPixelOpacity(q)),range),range));
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
{
status=MagickFalse;
continue;
}
}
image_view=DestroyCacheView(image_view);
if (status != MagickFalse)
image->depth=depth;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% BilevelType, GrayscaleType, GrayscaleMatteType, PaletteType,
% PaletteMatteType, TrueColorType, TrueColorMatteType,
% ColorSeparationType, ColorSeparationMatteType, OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
if (IsGrayImage(image,&image->exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace);
(void) NormalizeImage(image);
if (IsMonochromeImage(image,&image->exception) == MagickFalse)
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->colors=2;
image->matte=MagickFalse;
break;
}
case GrayscaleType:
{
if (IsGrayImage(image,&image->exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace);
image->matte=MagickFalse;
break;
}
case GrayscaleMatteType:
{
if (IsGrayImage(image,&image->exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case PaletteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->matte=MagickFalse;
break;
}
case PaletteBilevelMatteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
(void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteMatteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case TrueColorMatteType:
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case ColorSeparationType:
{
if (image->colorspace != CMYKColorspace)
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
status=TransformImageColorspace(image,CMYKColorspace);
}
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case ColorSeparationMatteType:
{
if (image->colorspace != CMYKColorspace)
{
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
status=TransformImageColorspace(image,sRGBColorspace);
status=TransformImageColorspace(image,CMYKColorspace);
}
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image_info=DestroyImageInfo(image_info);
if (status == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
|
search_engine.c | #include<omp.h>
#include<stdio.h>
#include<stdlib.h>
#include<dirent.h>
#include<string.h>
#include<unistd.h>
#include<stdbool.h>
#define MAX_FILE_COUNT 10
#define MAX_FILE_NAME_LENGTH 20
#define MAX_WORD_LENGTH 20
#define MAX_NUM_THREADS 4
#define MAX_SENTENCE_LENGTH 100
int main()
{
// Get all the names of the files in the corpus directory.
struct dirent *de;
DIR *dir = opendir("./corpus/");
char *file_names[MAX_FILE_COUNT];
int file_count = 0;
while ((de = readdir(dir)) != NULL)
{
//printf("%s\n", de->d_name);
if (file_count > 1)
{
file_names[file_count-2] = de->d_name;
}
file_count ++;
}
file_count -= 2;
printf("----> There is a total of %d files to be read.\n", file_count);
closedir(dir);
char word[MAX_WORD_LENGTH];
printf("Enter the word to be searched : ");
scanf("%s", word);
int work[MAX_NUM_THREADS][100];
int work_count[MAX_NUM_THREADS];
memset(work, 0, sizeof(work));
memset(work_count, 0, sizeof(work_count));
bool flag = false;
#pragma omp parallel shared(work, work_count, flag)
{
#pragma omp master
{
//Allocate work to other worker threads.
int num_threads = omp_get_num_threads();
printf("----> Number of avalible threads : %d.\n", num_threads);
int thread_num = 1;
for (int i=0; i<file_count; i++) {
work[thread_num][work_count[thread_num]++] = i;
thread_num ++;
if (thread_num == num_threads-1) {
thread_num = 1;
}
}
}
#pragma omp barrier
int thread_num = omp_get_thread_num();
if (thread_num > 0) {
printf("----> Thread : %d has %d files to read.\n", thread_num, work_count[thread_num]);
for (int i=0; i<work_count[thread_num]; i++) {
printf("----> Thread : %d is reading file : %s\n", thread_num, file_names[work[thread_num][i]]);
// Read contents from file and search
FILE *filePointer;
char *temp = "corpus/";
char cur_file_name[MAX_FILE_NAME_LENGTH];
strcpy(cur_file_name, temp);
strcat(cur_file_name, file_names[work[thread_num][i]]);
filePointer = fopen(cur_file_name, "r");
char cur_sentence[MAX_SENTENCE_LENGTH];
while(fgets(cur_sentence, MAX_SENTENCE_LENGTH, filePointer) != NULL) {
if (strstr(cur_sentence, word)!= NULL) {
printf("A match has been found in %s.\n", file_names[work[thread_num][i]]);
flag = true;
break;
}
strcpy(cur_sentence, "");
}
fclose(filePointer);
}
}
}
if (flag == false) {
printf("There was no match found for the word in any of the servers.\n");
}
} |
forcing.c | #include "pihm.h"
#if defined(_RT_)
void ApplyBc(int t, const rttbl_struct *rttbl, forc_struct *forc, elem_struct elem[], river_struct river[])
#else
void ApplyBc(int t, forc_struct *forc, elem_struct elem[], river_struct river[])
#endif
{
// Element boundary conditions
if (forc->nbc > 0)
{
#if defined(_RT_)
ApplyElemBc(t, rttbl, forc, elem);
#else
ApplyElemBc(t, forc, elem);
#endif
}
// River boundary conditions
if (forc->nriverbc > 0)
{
ApplyRiverBc(t, forc, river);
}
}
#if defined(_RT_)
void ApplyForcing(int t, int rad_mode, const siteinfo_struct *siteinfo, const rttbl_struct *rttbl, forc_struct *forc,
elem_struct elem[])
#elif defined(_NOAH_)
void ApplyForcing(int t, int rad_mode, const siteinfo_struct *siteinfo, forc_struct *forc, elem_struct elem[])
#else
void ApplyForcing(int t, forc_struct *forc, elem_struct elem[])
#endif
{
// Meteorological forcing
#if defined(_CYCLES_)
ApplyDailyMeteoForcing(t, rad_mode, siteinfo, forc, elem);
#elif defined(_NOAH_)
ApplyMeteoForcing(t, rad_mode, siteinfo, forc, elem);
#else
ApplyMeteoForcing(t, forc, elem);
#endif
// LAI forcing
#if defined(_BGC_) || defined(_CYCLES_)
ApplyLai(elem);
#else
ApplyLai(t, forc, elem);
#endif
#if defined(_RT_)
// Precipitation solute concentration
ApplyPrcpConc(t, rttbl, forc, elem);
#endif
}
#if defined(_RT_)
void ApplyElemBc(int t, const rttbl_struct *rttbl, forc_struct *forc, elem_struct elem[])
#else
void ApplyElemBc(int t, forc_struct *forc, elem_struct elem[])
#endif
{
int i, k;
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (k = 0; k < forc->nbc; k++)
{
#if defined(_RT_)
IntrplForcing(t, 1 + rttbl->num_stc, INTRPL, &forc->bc[k]);
#else
IntrplForcing(t, 1, INTRPL, &forc->bc[k]);
#endif
}
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < nelem; i++)
{
int ind;
int j;
#if defined(_RT_)
int k;
#endif
for (j = 0; j < NUM_EDGE; j++)
{
if (elem[i].attrib.bc[j] > 0)
{
ind = elem[i].attrib.bc[j] - 1;
elem[i].bc.head[j] = forc->bc[ind].value[0];
#if defined(_RT_)
for (k = 0; k < rttbl->num_stc; k++)
{
elem[i].bc.conc[j][k] = forc->bc[ind].value[k + 1];
}
#endif
}
else if (elem[i].attrib.bc[j] < 0)
{
ind = -elem[i].attrib.bc[j] - 1;
elem[i].bc.flux[j] = forc->bc[ind].value[0];
#if defined(_RT_)
for (k = 0; k < rttbl->num_stc; k++)
{
elem[i].bc.conc[j][k] = forc->bc[ind].value[k + 1];
}
#endif
}
#if defined(_DGW_)
if (elem[i].attrib.bc_geol[j] > 0)
{
// Dirichlet type boundary conditions
ind = elem[i].attrib.bc_geol[j] - 1;
elem[i].bc_geol.head[j] = forc->bc[ind].value[0];
# if defined(_RT_)
for (k = 0; k < rttbl->num_stc; k++)
{
elem[i].bc_geol.conc[j][k] = forc->bc[ind].value[k + 1];
}
# endif
}
else if (elem[i].attrib.bc_geol[j] < 0)
{
// Neumann type boundary conditions
ind = -elem[i].attrib.bc_geol[j] - 1;
elem[i].bc_geol.flux[j] = forc->bc[ind].value[0];
# if defined(_RT_)
for (k = 0; k < rttbl->num_stc; k++)
{
elem[i].bc_geol.conc[j][k] = forc->bc[ind].value[k + 1];
}
# endif
}
#endif
}
}
}
#if defined(_NOAH_)
void ApplyMeteoForcing(int t, int rad_mode, const siteinfo_struct *siteinfo, forc_struct *forc, elem_struct elem[])
#else
void ApplyMeteoForcing(int t, forc_struct *forc, elem_struct elem[])
#endif
{
int i, k;
#if defined(_NOAH_)
spa_data spa;
#endif
// Meteorological forcing for PIHM
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (k = 0; k < forc->nmeteo; k++)
{
IntrplForcing(t, NUM_METEO_VAR, INTRPL, &forc->meteo[k]);
}
#if defined(_NOAH_)
// Topographic radiation for Noah
if (rad_mode == TOPO_SOL)
{
# if defined(_OPENMP)
# pragma omp parallel for
# endif
for (k = 0; k < forc->nrad; k++)
{
IntrplForcing(t, 2, INTRPL, &forc->rad[k]);
}
// Calculate Sun position for topographic solar radiation
SunPos(t, siteinfo, &spa);
}
#endif
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < nelem; i++)
{
int ind;
ind = elem[i].attrib.meteo - 1;
elem[i].wf.prcp = forc->meteo[ind].value[PRCP_TS] / 1000.0;
elem[i].es.sfctmp = forc->meteo[ind].value[SFCTMP_TS];
elem[i].ps.rh = forc->meteo[ind].value[RH_TS];
elem[i].ps.sfcspd = forc->meteo[ind].value[SFCSPD_TS];
elem[i].ef.soldn = forc->meteo[ind].value[SOLAR_TS];
elem[i].ef.soldn = MAX(elem[i].ef.soldn, 0.0);
#if defined(_NOAH_)
elem[i].ef.longwave = forc->meteo[ind].value[LONGWAVE_TS];
#endif
elem[i].ps.sfcprs = forc->meteo[ind].value[PRES_TS];
#if defined(_NOAH_)
// Calculate solar radiation
if (rad_mode == TOPO_SOL)
{
elem[i].ef.soldir = forc->rad[ind].value[SOLDIR_TS];
elem[i].ef.soldif = forc->rad[ind].value[SOLDIF_TS];
elem[i].ef.soldn =
TopoRadn(elem[i].ef.soldir, elem[i].ef.soldif, spa.zenith, spa.azimuth180, &elem[i].topo);
elem[i].ef.soldn = MAX(elem[i].ef.soldn, 0.0);
}
#endif
}
}
#if defined(_CYCLES_)
void ApplyDailyMeteoForcing(int t, int rad_mode, const siteinfo_struct *siteinfo, forc_struct *forc, elem_struct elem[])
{
int i, k, kt;
spa_data spa;
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i < nelem; i++)
{
elem[i].wf.prcp = 0.0;
elem[i].es.sfctmp = 0.0;
elem[i].ps.rh = 0.0;
elem[i].ps.sfcspd = 0.0;
elem[i].ef.soldn = 0.0;
elem[i].ef.longwave = 0.0;
elem[i].ps.sfcprs = 0.0;
elem[i].weather.tmp_max = -DBL_MAX;
elem[i].weather.tmp_min = DBL_MAX;
elem[i].weather.rh_min = DBL_MAX;
}
for (kt = t; kt < t + DAYINSEC; kt += 3600)
{
#if defined(_OPENMP)
#pragma omp parallel for
#endif
// Meteorological forcing for PIHM
for (k = 0; k < forc->nmeteo; k++)
{
IntrplForcing(kt, NUM_METEO_VAR, INTRPL, &forc->meteo[k]);
}
// Topographic radiation for Noah
if (rad_mode == TOPO_SOL)
{
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (k = 0; k < forc->nrad; k++)
{
IntrplForcing(kt, 2, INTRPL, &forc->rad[k]);
}
// Calculate Sun position for topographic solar radiation
SunPos(kt, siteinfo, &spa);
}
#if defined(_OPENMP)
#pragma omp parallel for
#endif
for (i = 0; i < nelem; i++)
{
int ind;
ind = elem[i].attrib.meteo - 1;
// Calculate solar radiation
if (rad_mode == TOPO_SOL)
{
elem[i].ef.soldir = forc->rad[ind].value[SOLDIR_TS];
elem[i].ef.soldif = forc->rad[ind].value[SOLDIF_TS];
}
elem[i].wf.prcp += forc->meteo[ind].value[PRCP_TS] * 1.0E-3 / 24.0;
elem[i].es.sfctmp += forc->meteo[ind].value[SFCTMP_TS] / 24.0;
elem[i].ps.rh += forc->meteo[ind].value[RH_TS] / 24.0;
elem[i].ps.sfcspd += forc->meteo[ind].value[SFCSPD_TS] / 24.0;
elem[i].ef.soldn += (rad_mode > 0 && forc->nrad > 0) ?
MAX(TopoRadn(elem[i].ef.soldir, elem[i].ef.soldif, spa.zenith, spa.azimuth180, &elem[i].topo), 0.0) /
24.0 : MAX(forc->meteo[ind].value[SOLAR_TS], 0.0) / 24.0;
elem[i].ef.longwave += forc->meteo[ind].value[LONGWAVE_TS] / 24.0;
elem[i].ps.sfcprs += forc->meteo[ind].value[PRES_TS] / 24.0;
elem[i].weather.tmp_max = MAX(elem[i].weather.tmp_max, forc->meteo[ind].value[SFCTMP_TS] - TFREEZ);
elem[i].weather.tmp_min = MIN(elem[i].weather.tmp_min, forc->meteo[ind].value[SFCTMP_TS] - TFREEZ);
elem[i].weather.rh_min = MIN(elem[i].weather.rh_min, forc->meteo[ind].value[RH_TS]);
elem[i].weather.wind = elem[i].ps.sfcspd;
elem[i].weather.solar_rad = elem[i].ef.soldn * DAYINSEC * 1.0E-6;
elem[i].weather.atm_pres = elem[i].ps.sfcprs * 1.0E-3;
}
}
}
#endif
#if defined(_BGC_) || defined(_CYCLES_)
void ApplyLai(elem_struct elem[])
#else
void ApplyLai(int t, forc_struct *forc, elem_struct elem[])
#endif
{
int i;
#if defined(_CYCLES_)
// Cycles coupling
# if defined(_OPENMP)
# pragma omp parallel for
# endif
for (i = 0; i < nelem; i++)
{
const double KSOLAR = 0.5;
double tau;
if (CommRadIntcp(elem[i].crop) > 0.0)
{
tau = 1.0 - MIN(CommRadIntcp(elem[i].crop), 0.98);
elem[i].ps.proj_lai = -log(tau) / KSOLAR;
}
else
{
elem[i].ps.proj_lai = 0.0;
}
}
#elif defined(_BGC_)
// BGC coupling
# if defined(_OPENMP)
# pragma omp parallel for
# endif
for (i = 0; i < nelem; i++)
{
elem[i].ps.proj_lai = elem[i].cs.leafc * elem[i].epc.avg_proj_sla;
}
#else
// Use LAI forcing
int k;
if (forc->nlai > 0)
{
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (k = 0; k < forc->nlai; k++)
{
IntrplForcing(t, 1, INTRPL, &forc->lai[k]);
}
}
# if defined(_OPENMP)
# pragma omp parallel for
# endif
for (i = 0; i < nelem; i++)
{
int ind;
if (elem[i].attrib.lai > 0)
{
ind = elem[i].attrib.lai - 1;
elem[i].ps.proj_lai = forc->lai[ind].value[0];
}
else
{
elem[i].ps.proj_lai = MonthlyLai(t, elem[i].attrib.lc);
}
}
#endif
}
#if defined(_RT_)
void ApplyPrcpConc(int t, const rttbl_struct *rttbl, forc_struct *forc, elem_struct elem[])
{
int i, j;
if (forc->prcp_flag == 2)
{
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (j = 0; j < forc->nprcpc; j++)
{
IntrplForcing(t, rttbl->num_spc, NO_INTRPL, &forc->prcpc[j]);
}
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < nelem; i++)
{
int k;
int ind;
ind = elem[i].attrib.prcp_conc - 1;
for (k = 0; k < rttbl->num_spc; k++)
{
elem[i].prcpchm.tot_conc[k] = forc->prcpc[ind].value[k];
}
}
}
else
{
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < nelem; i++)
{
int k;
for (k = 0; k < rttbl->num_spc; k++)
{
elem[i].prcpchm.tot_conc[k] = (forc->prcp_flag == 1) ? rttbl->prcp_conc[k] : 0.0;
}
}
}
}
#endif
void ApplyRiverBc(int t, forc_struct *forc, river_struct river[])
{
int i, k;
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (k = 0; k < forc->nriverbc; k++)
{
IntrplForcing(t, 1, INTRPL, &forc->riverbc[k]);
}
#if defined(_OPENMP)
# pragma omp parallel for
#endif
for (i = 0; i < nriver; i++)
{
int ind;
if (river[i].attrib.riverbc_type > 0)
{
ind = river[i].attrib.riverbc_type - 1;
river[i].bc.head = forc->riverbc[ind].value[0];
}
else if (river[i].attrib.riverbc_type < 0)
{
ind = -river[i].attrib.riverbc_type - 1;
river[i].bc.flux = forc->riverbc[ind].value[0];
}
}
}
void IntrplForcing(int t, int nvrbl, int intrpl, tsdata_struct *ts)
{
int j;
int first, middle, last;
if (t < ts->ftime[0])
{
pihm_printf(VL_ERROR, "Error finding forcing for current time step.\nPlease check your forcing file.\n");
pihm_exit(EXIT_FAILURE);
}
else if (t > ts->ftime[ts->length - 1])
{
pihm_printf(VL_ERROR, "Error finding forcing for current time step.\nPlease check your forcing file.\n");
pihm_exit(EXIT_FAILURE);
}
first = 1;
last = ts->length - 1;
while (first <= last)
{
middle = (first + last) / 2;
if (t >= ts->ftime[middle - 1] && t < ts->ftime[middle])
{
for (j = 0; j < nvrbl; j++)
{
ts->value[j] = (intrpl) ?
((double)(ts->ftime[middle] - t) * ts->data[middle - 1][j] +
(double)(t - ts->ftime[middle - 1]) * ts->data[middle][j]) /
(double)(ts->ftime[middle] - ts->ftime[middle - 1]) : ts->data[middle - 1][j];
}
break;
}
else if (ts->ftime[middle] > t)
{
last = middle - 1;
}
else
{
first = middle + 1;
}
}
}
double MonthlyLai(int t, int lc)
{
// Monthly LAI data come from WRF MPTABLE.TBL for Noah MODIS land cover categories
pihm_t_struct pihm_time;
double lai_tbl[40][12] = {
// Evergreen Needleleaf Forest
{4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0, 4.0},
// Evergreen Broadleaf Forest
{4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 4.5, 4.5},
// Deciduous Needleleaf Forest
{0.0, 0.0, 0.0, 0.6, 1.2, 2.0, 2.6, 1.7, 1.0, 0.5, 0.2, 0.0},
// Deciduous Broadleaf Forest
{0.0, 0.0, 0.3, 1.2, 3.0, 4.7, 4.5, 3.4, 1.2, 0.3, 0.0, 0.0},
// Mixed Forest
{2.0, 2.0, 2.2, 2.6, 3.5, 4.3, 4.3, 3.7, 2.6, 2.2, 2.0, 2.0},
// Closed Shrubland
{0.0, 0.0, 0.3, 0.9, 2.2, 3.5, 3.5, 2.5, 0.9, 0.3, 0.0, 0.0},
// Open Shrubland
{0.0, 0.0, 0.2, 0.6, 1.5, 2.3, 2.3, 1.7, 0.6, 0.2, 0.0, 0.0},
// Woody Savanna
{0.2, 0.2, 0.4, 1.0, 2.4, 4.1, 4.1, 2.7, 1.0, 0.4, 0.2, 0.2},
// Savanna
{0.3, 0.3, 0.5, 0.8, 1.8, 3.6, 3.8, 2.1, 0.9, 0.5, 0.3, 0.3},
// Grassland
{0.4, 0.5, 0.6, 0.7, 1.2, 3.0, 3.5, 1.5, 0.7, 0.6, 0.5, 0.4},
// Permanent Wetland
{0.2, 0.3, 0.3, 0.5, 1.5, 2.9, 3.5, 2.7, 1.2, 0.3, 0.3, 0.2},
// Cropland
{0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 1.5, 0.0, 0.0, 0.0},
// Urban and Built-Up
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0},
// Cropland/Natural Veg. Mosaic
{0.2, 0.3, 0.3, 0.4, 1.1, 2.5, 3.2, 2.2, 1.1, 0.3, 0.3, 0.2},
// Permanent Snow
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0},
// Barren/Sparsely Vegetated
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0},
// IGBP Water
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0},
// Unclassified
{999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999},
// Fill Value
{999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999},
// Unclassified
{999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999, 999},
// Open Water
{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0},
// Perennial Ice/Snow
{1.06, 1.11, 1.20, 1.29, 1.45, 1.67, 1.71, 1.63, 1.45, 1.27, 1.13, 1.06},
// Developed Open Space
{0.83, 0.94, 1.06, 1.18, 1.86, 3.72, 4.81, 4.26, 2.09, 1.29, 1.06, 0.94},
// Developed Low Intensity
{0.96, 1.07, 1.20, 1.35, 2.04, 3.83, 4.87, 4.35, 2.30, 1.46, 1.18, 1.06},
// Developed Medium Intensity
{1.11, 1.22, 1.36, 1.54, 2.26, 3.97, 4.94, 4.46, 2.55, 1.66, 1.34, 1.20},
// Developed High Intensity
{1.24, 1.34, 1.50, 1.71, 2.45, 4.09, 5.00, 4.54, 2.76, 1.82, 1.47, 1.32},
// Barren Land
{0.03, 0.03, 0.03, 0.02, 0.02, 0.03, 0.04, 0.06, 0.09, 0.06, 0.04, 0.03},
// Deciduous Forest
{0.62, 0.67, 0.92, 1.71, 3.42, 5.53, 6.22, 5.60, 3.83, 1.79, 0.92, 0.67},
// Evergreen Forest
{3.38, 3.43, 3.47, 3.52, 3.78, 4.54, 4.98, 4.76, 3.87, 3.56, 3.47, 3.43},
// Mixed Forest
{3.10, 3.26, 3.61, 4.11, 5.17, 6.73, 7.21, 6.71, 5.34, 4.09, 3.41, 3.14},
// Dwarf Scrub
{0.24, 0.24, 0.19, 0.13, 0.15, 0.20, 0.26, 0.48, 0.70, 0.48, 0.30, 0.24},
// Shrub/Scrub
{0.35, 0.38, 0.38, 0.38, 0.55, 1.06, 1.53, 1.53, 1.04, 0.58, 0.44, 0.38},
// Grassland/Herbaceous
{0.70, 0.80, 0.90, 1.00, 1.60, 3.30, 4.30, 3.80, 1.80, 1.10, 0.90, 0.80},
// Sedge/Herbaceous
{0.70, 0.80, 0.90, 1.00, 1.60, 3.30, 4.30, 3.80, 1.80, 1.10, 0.90, 0.80},
// Lichens
{0.70, 0.80, 0.90, 1.00, 1.60, 3.30, 4.30, 3.80, 1.80, 1.10, 0.90, 0.80},
// Moss
{0.70, 0.80, 0.90, 1.00, 1.60, 3.30, 4.30, 3.80, 1.80, 1.10, 0.90, 0.80},
// Pasture/Hay
{0.47, 0.54, 0.60, 0.67, 1.07, 2.20, 2.87, 2.54, 1.20, 0.74, 0.60, 0.54},
// Cultivated Crops
{0.47, 0.54, 0.60, 0.67, 1.07, 2.20, 2.87, 2.54, 1.20, 0.74, 0.60, 0.54},
// Woody Wetland
{0.35, 0.38, 0.38, 0.38, 0.55, 1.06, 1.53, 1.53, 1.04, 0.58, 0.44, 0.38},
// Emergent Herbaceous Wetland
{0.24, 0.24, 0.19, 0.13, 0.15, 0.20, 0.26, 0.48, 0.70, 0.48, 0.30, 0.24}
};
pihm_time = PIHMTime(t);
return lai_tbl[lc - 1][pihm_time.month - 1];
}
double MonthlyRl(int t, int lc)
{
// Monthly roughness length data are calculated using monthly LAI data above with max/min LAI and max/min roughness
// length data in the vegprmt.tbl
pihm_t_struct pihm_time;
double rl_tbl[40][12] = {
// Evergreen Needleleaf Forest
{0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500},
// Evergreen Broadleaf Forest
{0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500},
// Deciduous Needleleaf Forest
{0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500},
// Deciduous Broadleaf Forest
{0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500},
// Mixed Forest
{0.200, 0.200, 0.200, 0.200, 0.278, 0.367, 0.367, 0.300, 0.200, 0.200, 0.200, 0.200},
// Closed Shrubland
{0.010, 0.010, 0.010, 0.015, 0.032, 0.048, 0.048, 0.035, 0.015, 0.010, 0.010, 0.010},
// Open Shrubland
{0.010, 0.010, 0.010, 0.010, 0.033, 0.053, 0.053, 0.038, 0.010, 0.010, 0.010, 0.010},
// Woody Savanna
{0.010, 0.010, 0.010, 0.016, 0.034, 0.050, 0.050, 0.038, 0.016, 0.010, 0.010, 0.010},
// Savanna
{0.150, 0.150, 0.150, 0.150, 0.150, 0.150, 0.150, 0.150, 0.150, 0.150, 0.150, 0.150},
// Grassland
{0.100, 0.100, 0.101, 0.102, 0.106, 0.120, 0.120, 0.108, 0.102, 0.101, 0.100, 0.100},
// Permanent Wetland
{0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000},
// Cropland
{0.050, 0.050, 0.050, 0.050, 0.050, 0.061, 0.085, 0.085, 0.050, 0.050, 0.050, 0.050},
// Urban and Built-Up
{0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500},
// Cropland/Natural Veg. Mosaic
{0.050, 0.050, 0.050, 0.050, 0.050, 0.059, 0.091, 0.050, 0.050, 0.050, 0.050, 0.050},
// Permanent Snow
{0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001},
// Barren/Sparsely Vegetated
{0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010, 0.010},
// IGBP Water
{0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001, 0.0001},
// Unclassified
{0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300, 0.300},
// Fill Value
{0.150, 0.150, 0.150, 0.150, 0.150, 0.150, 0.150, 0.150, 0.150, 0.150, 0.150, 0.150},
// Unclassified
{0.050, 0.050, 0.050, 0.050, 0.050, 0.050, 0.050, 0.050, 0.050, 0.050, 0.050, 0.050},
// Open Water
{0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000},
// Perennial Ice/Snow
{0.152, 0.151, 0.155, 0.165, 0.169, 0.169, 0.169, 0.170, 0.170, 0.166, 0.157, 0.152},
// Developed Open Space
{0.089, 0.089, 0.091, 0.093, 0.095, 0.094, 0.093, 0.094, 0.095, 0.094, 0.091, 0.089},
// Developed Low Intensity
{0.119, 0.119, 0.123, 0.132, 0.137, 0.136, 0.135, 0.136, 0.137, 0.133, 0.124, 0.119},
// Developed Medium Intensity
{0.154, 0.153, 0.163, 0.179, 0.187, 0.187, 0.186, 0.187, 0.188, 0.180, 0.163, 0.154},
// Developed High Intensity
{0.183, 0.183, 0.195, 0.218, 0.229, 0.229, 0.228, 0.229, 0.230, 0.220, 0.196, 0.183},
// Barren Land
{0.013, 0.013, 0.013, 0.013, 0.013, 0.013, 0.013, 0.013, 0.014, 0.014, 0.013, 0.013},
// Deciduous Forest
{0.343, 0.343, 0.431, 0.577, 0.650, 0.657, 0.656, 0.653, 0.653, 0.581, 0.431, 0.343},
// Evergreen Forest
{1.623, 1.623, 1.623, 1.623, 1.623, 1.623, 1.622, 1.622, 1.623, 1.623, 1.623, 1.623},
// Mixed Forest
{0.521, 0.518, 0.557, 0.629, 0.663, 0.664, 0.665, 0.665, 0.667, 0.633, 0.562, 0.521},
// Dwarf Scrub
{0.022, 0.022, 0.021, 0.020, 0.020, 0.020, 0.020, 0.023, 0.025, 0.024, 0.023, 0.022},
// Shrub/Scrub
{0.034, 0.034, 0.033, 0.033, 0.033, 0.032, 0.032, 0.034, 0.035, 0.035, 0.034, 0.034},
// Grassland/Herbaceous
{0.070, 0.070, 0.070, 0.070, 0.070, 0.069, 0.068, 0.069, 0.070, 0.070, 0.070, 0.070},
// Sedge/Herbaceous
{0.070, 0.070, 0.070, 0.070, 0.070, 0.069, 0.068, 0.069, 0.070, 0.070, 0.070, 0.070},
// Lichens
{0.070, 0.070, 0.070, 0.070, 0.070, 0.069, 0.068, 0.069, 0.070, 0.070, 0.070, 0.070},
// Moss
{0.070, 0.070, 0.070, 0.070, 0.070, 0.069, 0.068, 0.069, 0.070, 0.070, 0.070, 0.070},
// Pasture/Hay
{0.047, 0.047, 0.047, 0.047, 0.047, 0.046, 0.046, 0.046, 0.047, 0.047, 0.047, 0.047},
// Cultivated Crops
{0.047, 0.047, 0.047, 0.047, 0.047, 0.046, 0.046, 0.046, 0.047, 0.047, 0.047, 0.047},
// Woody Wetland
{0.038, 0.038, 0.038, 0.037, 0.037, 0.037, 0.037, 0.039, 0.040, 0.039, 0.039, 0.038},
// Emergent Herbaceous Wetland
{0.027, 0.027, 0.026, 0.024, 0.024, 0.024, 0.024, 0.028, 0.029, 0.029, 0.027, 0.027}
};
pihm_time = PIHMTime(t);
return rl_tbl[lc - 1][pihm_time.month - 1];
}
double MonthlyMf(int t)
{
pihm_t_struct pihm_time;
double mf_tbl[12] = {
0.001308019, 0.001633298, 0.002131198, 0.002632776, 0.003031171, 0.003197325, 0.003095839, 0.002745240,
0.002260213, 0.001759481, 0.001373646, 0.001202083
};
pihm_time = PIHMTime(t);
return mf_tbl[pihm_time.month - 1];
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.